summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--SECURITY.md24
-rw-r--r--bitbake/SECURITY.md24
-rwxr-xr-xbitbake/bin/bitbake-getvar48
-rw-r--r--bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst4
-rw-r--r--bitbake/lib/bb/__init__.py7
-rw-r--r--bitbake/lib/bb/command.py12
-rw-r--r--bitbake/lib/bb/cooker.py5
-rw-r--r--bitbake/lib/bb/data.py1
-rw-r--r--bitbake/lib/bb/fetch2/git.py27
-rw-r--r--bitbake/lib/bb/fetch2/wget.py22
-rw-r--r--bitbake/lib/bb/monitordisk.py7
-rw-r--r--bitbake/lib/bb/runqueue.py178
-rw-r--r--bitbake/lib/bb/siggen.py3
-rw-r--r--bitbake/lib/bb/tests/codeparser.py26
-rw-r--r--bitbake/lib/bb/tests/fetch.py58
-rw-r--r--bitbake/lib/bb/tinfoil.py4
-rw-r--r--bitbake/lib/bb/ui/knotty.py11
-rw-r--r--bitbake/lib/bb/utils.py45
-rw-r--r--bitbake/lib/bblayers/layerindex.py1
-rw-r--r--bitbake/lib/toaster/toastergui/api.py26
-rw-r--r--documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst15
-rw-r--r--documentation/conf.py1
-rw-r--r--documentation/dev-manual/dev-manual-common-tasks.rst74
-rw-r--r--documentation/dev-manual/dev-manual-start.rst19
-rw-r--r--documentation/kernel-dev/kernel-dev-common.rst2
-rw-r--r--documentation/overview-manual/overview-manual-concepts.rst4
-rw-r--r--documentation/overview-manual/overview-manual-yp-intro.rst2
-rw-r--r--documentation/poky.yaml10
-rw-r--r--documentation/profile-manual/profile-manual-usage.rst6
-rw-r--r--documentation/ref-manual/ref-classes.rst10
-rw-r--r--documentation/ref-manual/ref-features.rst2
-rw-r--r--documentation/ref-manual/ref-images.rst17
-rw-r--r--documentation/ref-manual/ref-release-process.rst2
-rw-r--r--documentation/ref-manual/ref-system-requirements.rst34
-rw-r--r--documentation/ref-manual/ref-variables.rst67
-rw-r--r--meta-poky/conf/distro/poky.conf21
-rw-r--r--meta-poky/conf/local.conf.sample2
-rw-r--r--meta-selftest/recipes-test/images/oe-selftest-image.bb2
-rw-r--r--meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend8
-rw-r--r--meta/classes/archiver.bbclass11
-rw-r--r--meta/classes/base.bbclass2
-rw-r--r--meta/classes/bin_package.bbclass3
-rw-r--r--meta/classes/create-spdx-2.2.bbclass1067
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass323
-rw-r--r--meta/classes/devshell.bbclass2
-rw-r--r--meta/classes/externalsrc.bbclass25
-rw-r--r--meta/classes/fs-uuid.bbclass2
-rw-r--r--meta/classes/go.bbclass2
-rw-r--r--meta/classes/image.bbclass9
-rw-r--r--meta/classes/insane.bbclass8
-rw-r--r--meta/classes/kernel-arch.bbclass4
-rw-r--r--meta/classes/kernel-fitimage.bbclass155
-rw-r--r--meta/classes/kernel-yocto.bbclass10
-rw-r--r--meta/classes/kernel.bbclass62
-rw-r--r--meta/classes/libc-package.bbclass1
-rw-r--r--meta/classes/license.bbclass8
-rw-r--r--meta/classes/license_image.bbclass2
-rw-r--r--meta/classes/multilib.bbclass1
-rw-r--r--meta/classes/nativesdk.bbclass2
-rw-r--r--meta/classes/package.bbclass39
-rw-r--r--meta/classes/populate_sdk_base.bbclass4
-rw-r--r--meta/classes/populate_sdk_ext.bbclass7
-rw-r--r--meta/classes/pypi.bbclass2
-rw-r--r--meta/classes/qemuboot.bbclass3
-rw-r--r--meta/classes/rm_work.bbclass15
-rw-r--r--meta/classes/rootfs-postcommands.bbclass8
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes/sanity.bbclass8
-rw-r--r--meta/classes/sstate.bbclass2
-rw-r--r--meta/classes/staging.bbclass4
-rw-r--r--meta/classes/testimage.bbclass21
-rw-r--r--meta/classes/toolchain-scripts.bbclass4
-rw-r--r--meta/classes/uninative.bbclass4
-rw-r--r--meta/classes/useradd-staticids.bbclass2
-rw-r--r--meta/conf/bitbake.conf2
-rw-r--r--meta/conf/distro/include/cve-extra-exclusions.inc31
-rw-r--r--meta/conf/distro/include/maintainers.inc2
-rw-r--r--meta/conf/distro/include/ptest-packagelists.inc1
-rw-r--r--meta/conf/distro/include/yocto-uninative.inc10
-rw-r--r--meta/conf/licenses.conf15
-rw-r--r--meta/files/spdx-licenses.json5937
-rw-r--r--meta/lib/oe/cve_check.py133
-rw-r--r--meta/lib/oe/package_manager.py13
-rw-r--r--meta/lib/oe/packagedata.py11
-rw-r--r--meta/lib/oe/patch.py6
-rw-r--r--meta/lib/oe/reproducible.py3
-rw-r--r--meta/lib/oe/rootfs.py4
-rw-r--r--meta/lib/oe/sbom.py84
-rw-r--r--meta/lib/oe/spdx.py357
-rw-r--r--meta/lib/oe/sstatesig.py5
-rw-r--r--meta/lib/oe/terminal.py4
-rw-r--r--meta/lib/oeqa/core/target/ssh.py3
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/rpm.py23
-rw-r--r--meta/lib/oeqa/runtime/cases/rtc.py8
-rw-r--r--meta/lib/oeqa/runtime/cases/scp.py2
-rw-r--r--meta/lib/oeqa/runtime/context.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/cve_check.py178
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py10
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/oescripts.py3
-rw-r--r--meta/lib/oeqa/selftest/cases/prservice.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py5
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py16
-rw-r--r--meta/lib/oeqa/selftest/cases/tinfoil.py14
-rw-r--r--meta/lib/oeqa/utils/metadata.py6
-rw-r--r--meta/lib/oeqa/utils/nfs.py4
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py11
-rw-r--r--meta/recipes-bsp/efivar/efivar_37.bb2
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2020-27749.patch609
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-20225.patch58
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-20233.patch50
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3695.patch178
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3696.patch46
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3697.patch82
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2021-3981.patch32
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-2601.patch87
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28733.patch60
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28734.patch67
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28735.patch271
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-28736.patch275
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2022-3775.patch97
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4692.patch97
-rw-r--r--meta/recipes-bsp/grub/files/CVE-2023-4693.patch62
-rw-r--r--meta/recipes-bsp/grub/files/determinism.patch2
-rw-r--r--meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch117
-rw-r--r--meta/recipes-bsp/grub/grub2.inc18
-rw-r--r--meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb5
-rw-r--r--meta/recipes-connectivity/avahi/avahi.inc9
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch60
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch48
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch65
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch57
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch53
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch73
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch52
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch45
-rw-r--r--meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch109
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch67
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch31
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch33
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch166
-rw-r--r--meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch175
-rw-r--r--meta/recipes-connectivity/bind/bind_9.11.37.bb5
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5.inc4
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch39
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch126
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch54
-rw-r--r--meta/recipes-connectivity/bluez5/bluez5_5.55.bb7
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch37
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch266
-rw-r--r--meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch54
-rw-r--r--meta/recipes-connectivity/connman/connman_1.37.bb3
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch120
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch40
-rw-r--r--meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb2
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch283
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch254
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch54
-rw-r--r--meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb3
-rw-r--r--meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb4
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch189
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch581
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch171
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch34
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch194
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch73
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch125
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch315
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch38
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch39
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch307
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch120
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch468
-rw-r--r--meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch95
-rw-r--r--meta/recipes-connectivity/openssh/openssh/sshd.socket1
-rw-r--r--meta/recipes-connectivity/openssh/openssh/sshd@.service2
-rw-r--r--meta/recipes-connectivity/openssh/openssh_8.2p1.bb28
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch38
-rw-r--r--meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch37
-rw-r--r--meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch122
-rw-r--r--meta/recipes-connectivity/openssl/openssl_1.1.1w.bb (renamed from meta/recipes-connectivity/openssl/openssl_1.1.1n.bb)5
-rw-r--r--meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch50
-rw-r--r--meta/recipes-connectivity/ppp/ppp_2.4.7.bb1
-rw-r--r--meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb2
-rw-r--r--meta/recipes-core/base-files/base-files/hosts2
-rw-r--r--meta/recipes-core/busybox/busybox.inc27
-rw-r--r--meta/recipes-core/busybox/busybox/CVE-2022-48174.patch82
-rw-r--r--meta/recipes-core/busybox/busybox_1.31.1.bb1
-rw-r--r--meta/recipes-core/coreutils/coreutils_8.31.bb1
-rw-r--r--meta/recipes-core/dbus/dbus-test_1.12.24.bb (renamed from meta/recipes-core/dbus/dbus-test_1.12.20.bb)0
-rw-r--r--meta/recipes-core/dbus/dbus.inc6
-rw-r--r--meta/recipes-core/dbus/dbus/CVE-2023-34969.patch96
-rw-r--r--meta/recipes-core/dbus/dbus_1.12.24.bb (renamed from meta/recipes-core/dbus/dbus_1.12.20.bb)0
-rw-r--r--meta/recipes-core/dropbear/dropbear.inc6
-rw-r--r--meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch145
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-40674.patch53
-rw-r--r--meta/recipes-core/expat/expat/CVE-2022-43680.patch33
-rw-r--r--meta/recipes-core/expat/expat_2.2.9.bb2
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch290
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch89
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch255
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch154
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch103
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch210
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch417
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch113
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch80
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch396
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch49
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch394
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch97
-rw-r--r--meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb14
-rw-r--r--meta/recipes-core/glibc/glibc-version.inc2
-rw-r--r--meta/recipes-core/glibc/glibc.inc4
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch26
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-0687.patch82
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-4813.patch986
-rw-r--r--meta/recipes-core/glibc/glibc/CVE-2023-4911.patch63
-rw-r--r--meta/recipes-core/glibc/glibc/check-test-wrapper2
-rw-r--r--meta/recipes-core/glibc/glibc_2.31.bb10
-rw-r--r--meta/recipes-core/images/build-appliance-image_15.0.0.bb2
-rwxr-xr-xmeta/recipes-core/initrdscripts/initramfs-framework/finish9
-rw-r--r--meta/recipes-core/initscripts/initscripts_1.0.bb2
-rw-r--r--meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch813
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch89
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch35
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch53
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch348
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch623
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch104
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch79
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch42
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch36
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch71
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch44
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch50
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch80
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch38
-rw-r--r--meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch33
-rw-r--r--meta/recipes-core/libxml/libxml2_2.9.10.bb26
-rw-r--r--meta/recipes-core/meta/buildtools-tarball.bb2
-rw-r--r--meta/recipes-core/meta/cve-update-db-native.bb157
-rw-r--r--meta/recipes-core/meta/cve-update-nvd2-native.bb372
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2022-29458.patch135
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-29491.patch45
-rw-r--r--meta/recipes-core/ncurses/files/CVE-2023-50495.patch79
-rw-r--r--meta/recipes-core/ncurses/ncurses_6.2.bb5
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch49
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch53
-rw-r--r--meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch41
-rw-r--r--meta/recipes-core/ovmf/ovmf_git.bb3
-rw-r--r--meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb1
-rw-r--r--meta/recipes-core/psplash/files/psplash-start.service1
-rw-r--r--meta/recipes-core/psplash/files/psplash-systemd.service1
-rwxr-xr-xmeta/recipes-core/systemd/systemd-systemctl/systemctl22
-rw-r--r--meta/recipes-core/systemd/systemd/00-create-volatile.conf1
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2018-21029.patch120
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2022-3821.patch47
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch115
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch264
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch182
-rw-r--r--meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch32
-rw-r--r--meta/recipes-core/systemd/systemd/systemd-pager.sh7
-rw-r--r--meta/recipes-core/systemd/systemd_244.5.bb18
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2022-37434.patch44
-rw-r--r--meta/recipes-core/zlib/zlib/CVE-2023-45853.patch40
-rw-r--r--meta/recipes-core/zlib/zlib_1.2.11.bb5
-rw-r--r--meta/recipes-devtools/binutils/binutils-2.34.inc12
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch4
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch80
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch35
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch37
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch32
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch64
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch34
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch31
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch57
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch49
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch530
-rw-r--r--meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch149
-rw-r--r--meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake8
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch236
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch198
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch62
-rw-r--r--meta/recipes-devtools/dmidecode/dmidecode_3.2.bb3
-rw-r--r--meta/recipes-devtools/dpkg/dpkg_1.19.8.bb (renamed from meta/recipes-devtools/dpkg/dpkg_1.19.7.bb)4
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch42
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest1
-rw-r--r--meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb9
-rw-r--r--meta/recipes-devtools/elfutils/elfutils_0.178.bb1
-rw-r--r--meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch72
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0001-Backport-fix-for-PR-tree-optimization-97236-fix-bad-.patch119
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch204
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch600
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch659
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.3/0040-fix-missing-dependencies-for-selftests.patch45
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5.inc (renamed from meta/recipes-devtools/gcc/gcc-9.3.inc)17
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch44
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch (renamed from meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch1506
-rw-r--r--meta/recipes-devtools/gcc/gcc-common.inc2
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-cross_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-cross_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-runtime_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-runtime_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc-shared-source.inc3
-rw-r--r--meta/recipes-devtools/gcc/gcc-source.inc1
-rw-r--r--meta/recipes-devtools/gcc/gcc-source_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc-source_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/gcc_9.5.bb (renamed from meta/recipes-devtools/gcc/gcc_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc-initial_9.5.bb (renamed from meta/recipes-devtools/gcc/libgcc-initial_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgcc_9.5.bb (renamed from meta/recipes-devtools/gcc/libgcc_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gcc/libgfortran_9.5.bb (renamed from meta/recipes-devtools/gcc/libgfortran_9.3.bb)0
-rw-r--r--meta/recipes-devtools/gdb/gdb-9.1.inc1
-rw-r--r--meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch75
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-23521.patch367
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-01.patch39
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-02.patch187
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-03.patch146
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-04.patch150
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-05.patch98
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-06.patch90
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-07.patch123
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-08.patch67
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-09.patch162
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-10.patch99
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-11.patch90
-rw-r--r--meta/recipes-devtools/git/files/CVE-2022-41903-12.patch124
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-1.patch179
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-2.patch122
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-22490-3.patch154
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-23946.patch184
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-25652.patch94
-rw-r--r--meta/recipes-devtools/git/files/CVE-2023-29007.patch159
-rw-r--r--meta/recipes-devtools/git/git.inc26
-rw-r--r--meta/recipes-devtools/go/go-1.14.inc93
-rw-r--r--meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch74
-rw-r--r--meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch48
-rw-r--r--meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch36
-rw-r--r--meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch82
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch65
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch191
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch38
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch373
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch113
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch101
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch79
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch86
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch93
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch83
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch357
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch198
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch68
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch104
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch36
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch111
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch164
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch47
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch116
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch71
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch131
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch120
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch49
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch113
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch75
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch53
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch104
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch156
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch85
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch97
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch98
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch660
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch200
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch134
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch184
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch349
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch76
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch125
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch635
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch393
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch497
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch585
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch371
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch60
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch90
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch94
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch201
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch84
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch112
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch38
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch212
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch114
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch175
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch262
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch230
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch181
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch393
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch401
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch86
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch1697
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch121
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch271
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch205
-rw-r--r--meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch197
-rw-r--r--meta/recipes-devtools/go/go-crosssdk.inc2
-rw-r--r--meta/recipes-devtools/go/go_1.14.bb4
-rw-r--r--meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch42
-rw-r--r--meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch104
-rw-r--r--meta/recipes-devtools/nasm/nasm_2.15.05.bb (renamed from meta/recipes-devtools/nasm/nasm_2.15.03.bb)5
-rw-r--r--meta/recipes-devtools/ninja/ninja_1.10.0.bb3
-rw-r--r--meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch50
-rw-r--r--meta/recipes-devtools/opkg/opkg_0.4.2.bb5
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-31484.patch27
-rw-r--r--meta/recipes-devtools/perl/files/CVE-2023-47038.patch121
-rw-r--r--meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb1
-rw-r--r--meta/recipes-devtools/perl/perl_5.30.1.bb6
-rw-r--r--meta/recipes-devtools/python/python-setuptools.inc2
-rw-r--r--meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch48
-rw-r--r--meta/recipes-devtools/python/python3-pip_20.0.2.bb1
-rw-r--r--meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch29
-rw-r--r--meta/recipes-devtools/python/python3/CVE-2023-24329.patch80
-rw-r--r--meta/recipes-devtools/python/python3/python3-manifest.json4
-rw-r--r--meta/recipes-devtools/python/python3_3.8.18.bb (renamed from meta/recipes-devtools/python/python3_3.8.13.bb)9
-rw-r--r--meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb2
-rw-r--r--meta/recipes-devtools/qemu/qemu.inc70
-rw-r--r--meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch9
-rw-r--r--meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch63
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch91
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch69
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch65
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch69
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch61
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch50
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch44
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch39
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch94
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch73
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch51
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch45
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch62
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch85
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch103
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch71
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch93
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch80
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch67
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch124
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch180
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch81
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch89
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch43
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch42
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch52
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch57
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch53
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch103
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch77
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch178
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch49
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch87
-rw-r--r--meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch114
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch146
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch55
-rw-r--r--meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch236
-rw-r--r--meta/recipes-devtools/qemu/qemu_4.2.0.bb4
-rw-r--r--meta/recipes-devtools/quilt/quilt.inc1
-rw-r--r--meta/recipes-devtools/quilt/quilt/faildiff-order.patch41
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch60
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch55
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch34
-rw-r--r--meta/recipes-devtools/rpm/files/CVE-2021-3521.patch330
-rw-r--r--meta/recipes-devtools/rpm/rpm_4.14.2.1.bb4
-rw-r--r--meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch31
-rw-r--r--meta/recipes-devtools/rsync/files/CVE-2022-29154.patch334
-rw-r--r--meta/recipes-devtools/rsync/rsync_3.1.3.bb2
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch139
-rw-r--r--meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch61
-rw-r--r--meta/recipes-devtools/ruby/ruby_2.7.6.bb (renamed from meta/recipes-devtools/ruby/ruby_2.7.5.bb)10
-rw-r--r--meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service2
-rw-r--r--meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch146
-rw-r--r--meta/recipes-devtools/subversion/subversion_1.13.0.bb1
-rw-r--r--meta/recipes-devtools/valgrind/valgrind/remove-for-all1
-rw-r--r--meta/recipes-extended/bc/bc_1.07.1.bb2
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch58
-rw-r--r--meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch312
-rw-r--r--meta/recipes-extended/cpio/cpio_2.13.bb2
-rw-r--r--meta/recipes-extended/cups/cups.inc7
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2022-26691.patch33
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32324.patch36
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-32360.patch31
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-34241.patch65
-rw-r--r--meta/recipes-extended/cups/cups/CVE-2023-4504.patch40
-rw-r--r--meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch28
-rw-r--r--meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch24
-rw-r--r--meta/recipes-extended/gawk/gawk_5.0.1.bb15
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch31
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch109
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch54
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch145
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch60
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch62
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch62
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch2
-rw-r--r--meta/recipes-extended/ghostscript/ghostscript_9.52.bb7
-rw-r--r--meta/recipes-extended/less/less/CVE-2022-48624.patch41
-rw-r--r--meta/recipes-extended/less/less_551.bb1
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch183
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch23
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch172
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch29
-rw-r--r--meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch43
-rw-r--r--meta/recipes-extended/libarchive/libarchive_3.4.2.bb8
-rw-r--r--meta/recipes-extended/libnss-nis/libnss-nis.bb4
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch155
-rw-r--r--meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb6
-rw-r--r--meta/recipes-extended/mdadm/files/CVE-2023-28736.patch77
-rw-r--r--meta/recipes-extended/mdadm/files/CVE-2023-28938.patch80
-rw-r--r--meta/recipes-extended/mdadm/mdadm_4.1.bb2
-rw-r--r--meta/recipes-extended/pam/libpam/CVE-2024-22365.patch59
-rw-r--r--meta/recipes-extended/pam/libpam_1.3.1.bb1
-rw-r--r--meta/recipes-extended/procps/procps/CVE-2023-4016.patch85
-rw-r--r--meta/recipes-extended/procps/procps_3.3.16.bb1
-rw-r--r--meta/recipes-extended/screen/screen/CVE-2023-24626.patch40
-rw-r--r--meta/recipes-extended/screen/screen_4.8.0.bb1
-rw-r--r--meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch66
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-29383.patch54
-rw-r--r--meta/recipes-extended/shadow/files/CVE-2023-4641.patch146
-rw-r--r--meta/recipes-extended/shadow/shadow.inc3
-rw-r--r--meta/recipes-extended/shadow/shadow_4.8.1.bb4
-rw-r--r--meta/recipes-extended/sudo/files/CVE-2023-22809.patch113
-rw-r--r--meta/recipes-extended/sudo/sudo.inc2
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch59
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch646
-rw-r--r--meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch26
-rw-r--r--meta/recipes-extended/sudo/sudo_1.8.32.bb4
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch92
-rw-r--r--meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch46
-rw-r--r--meta/recipes-extended/sysstat/sysstat_12.2.1.bb5
-rw-r--r--meta/recipes-extended/tar/tar/CVE-2022-48303.patch43
-rw-r--r--meta/recipes-extended/tar/tar/CVE-2023-39804.patch64
-rw-r--r--meta/recipes-extended/tar/tar_1.32.bb4
-rw-r--r--meta/recipes-extended/timezone/timezone.inc7
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch67
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch39
-rw-r--r--meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch33
-rw-r--r--meta/recipes-extended/unzip/unzip_6.0.bb3
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch165
-rw-r--r--meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb1
-rw-r--r--meta/recipes-gnome/epiphany/epiphany_3.34.4.bb1
-rw-r--r--meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch46
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch61
-rw-r--r--meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb1
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch21
-rw-r--r--meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch46
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch33
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch38
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch31
-rw-r--r--meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch40
-rw-r--r--meta/recipes-graphics/freetype/freetype_2.10.1.bb4
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch335
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch135
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch179
-rw-r--r--meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb5
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch457
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch400
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch133
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch97
-rw-r--r--meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch75
-rw-r--r--meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb5
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch38
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch38
-rw-r--r--meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb2
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch100
-rw-r--r--meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb1
-rw-r--r--meta/recipes-graphics/vulkan/assimp_5.0.1.bb2
-rw-r--r--meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch111
-rw-r--r--meta/recipes-graphics/wayland/wayland_1.18.0.bb1
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch58
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch38
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch111
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch63
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch42
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch46
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch52
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch64
-rw-r--r--meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb8
-rw-r--r--meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb (renamed from meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb)7
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch34
-rw-r--r--meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb1
-rw-r--r--meta/recipes-graphics/xorg-lib/xorg-lib-common.inc3
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch40
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch64
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch49
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch39
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch55
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch86
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch78
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch51
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch75
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch38
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch46
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch84
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch102
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch79
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch63
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch55
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch87
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch221
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch41
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch45
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch64
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch46
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch113
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch74
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch57
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch49
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch47
-rw-r--r--meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb29
-rw-r--r--meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb1
-rw-r--r--meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch32
-rw-r--r--meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb3
-rw-r--r--meta/recipes-kernel/kmod/kmod/ptest.patch25
-rw-r--r--meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb (renamed from meta/recipes-kernel/linux-firmware/linux-firmware_20220411.bb)135
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion.inc13
-rw-r--r--meta/recipes-kernel/linux/cve-exclusion_5.4.inc9445
-rwxr-xr-xmeta/recipes-kernel/linux/generate-cve-exclusions.py101
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-dev.bb2
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb6
-rw-r--r--meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb8
-rw-r--r--meta/recipes-kernel/linux/linux-yocto.inc3
-rw-r--r--meta/recipes-kernel/linux/linux-yocto_5.4.bb23
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0001-fix-strncpy-equals-destination-size-warning.patch42
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0002-fix-objtool-Rename-frame.h-objtool.h-v5.10.patch88
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0003-fix-btrfs-tracepoints-output-proper-root-owner-for-t.patch316
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0004-fix-btrfs-make-ordered-extent-tracepoint-take-btrfs_.patch179
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-fast-commit-recovery-path-v5.10.patch91
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0006-fix-KVM-x86-Add-intr-vectoring-info-and-error-code-t.patch124
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0007-fix-kvm-x86-mmu-Add-TDP-MMU-PF-handler-v5.10.patch82
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0008-fix-KVM-x86-mmu-Return-unique-RET_PF_-values-if-the-.patch71
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0009-fix-tracepoint-Optimize-using-static_call-v5.10.patch155
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0010-fix-include-order-for-older-kernels.patch31
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0011-Add-release-maintainer-script.patch59
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0012-Improve-the-release-script.patch173
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0013-fix-backport-of-fix-ext4-fast-commit-recovery-path-v.patch32
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0014-Revert-fix-include-order-for-older-kernels.patch32
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0015-fix-backport-of-fix-tracepoint-Optimize-using-static.patch46
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0016-fix-adjust-version-range-for-trace_find_free_extent.patch30
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch46
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch45
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch51
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch147
-rw-r--r--meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb (renamed from meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb)24
-rw-r--r--meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb2
-rw-r--r--meta/recipes-kernel/perf/perf.bb4
-rw-r--r--meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch49
-rw-r--r--meta/recipes-kernel/systemtap/systemtap_git.bb4
-rw-r--r--meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb (renamed from meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb)4
-rw-r--r--meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb2
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch36
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch41
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch67
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch136
-rw-r--r--meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb4
-rw-r--r--meta/recipes-multimedia/flac/files/CVE-2020-22219.patch197
-rw-r--r--meta/recipes-multimedia/flac/files/CVE-2021-0561.patch34
-rw-r--r--meta/recipes-multimedia/flac/flac_1.3.3.bb2
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch59
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch69
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch214
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch60
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb4
-rw-r--r--meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb9
-rw-r--r--meta/recipes-multimedia/libpng/files/run-ptest29
-rw-r--r--meta/recipes-multimedia/libpng/libpng_1.6.37.bb15
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch30
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch46
-rw-r--r--meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb4
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch39
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch34
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch37
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch58
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch183
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch159
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch29
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch659
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch123
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch277
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch45
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch548
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch26
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch157
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch135
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch91
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch173
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch90
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch33
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch59
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch35
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch47
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch34
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch67
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch53
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch30
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch191
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch152
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch46
-rw-r--r--meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch94
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch212
-rw-r--r--meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch62
-rw-r--r--meta/recipes-multimedia/libtiff/tiff_4.1.0.bb35
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-1999.patch55
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch366
-rw-r--r--meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch53
-rw-r--r--meta/recipes-multimedia/webp/libwebp_1.1.0.bb6
-rw-r--r--meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch135
-rw-r--r--meta/recipes-support/apr/apr-util_1.6.3.bb (renamed from meta/recipes-support/apr/apr-util_1.6.1.bb)8
-rw-r--r--meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch20
-rw-r--r--meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch58
-rw-r--r--meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch25
-rw-r--r--meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch63
-rw-r--r--meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch76
-rw-r--r--meta/recipes-support/apr/apr/CVE-2021-35940.patch58
-rw-r--r--meta/recipes-support/apr/apr/libtoolize_check.patch21
-rw-r--r--meta/recipes-support/apr/apr_1.7.2.bb (renamed from meta/recipes-support/apr/apr_1.7.0.bb)24
-rw-r--r--meta/recipes-support/bmap-tools/bmap-tools_3.5.bb2
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-22576.patch148
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-1.patch45
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-2.patch80
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-3.patch83
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27774-4.patch35
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27775.patch39
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27776.patch114
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27781.patch46
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27782-1.patch363
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-27782-2.patch71
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32206.patch52
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32207.patch284
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32208.patch72
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-32221.patch29
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-35252.patch72
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-35260.patch68
-rw-r--r--meta/recipes-support/curl/curl/CVE-2022-43552.patch82
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-23916.patch231
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27533.patch59
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch51
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27534.patch33
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch236
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27535.patch170
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27536.patch55
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-27538.patch31
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch197
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28320.patch86
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28321.patch272
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-28322.patch380
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-32001.patch38
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38545.patch148
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-38546.patch132
-rw-r--r--meta/recipes-support/curl/curl/CVE-2023-46218.patch52
-rw-r--r--meta/recipes-support/curl/curl/CVE-2024-2398.patch88
-rw-r--r--meta/recipes-support/curl/curl_7.69.1.bb39
-rw-r--r--meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch44
-rw-r--r--meta/recipes-support/gnupg/gnupg_2.2.27.bb1
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch37
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch282
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch85
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch206
-rw-r--r--meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch125
-rw-r--r--meta/recipes-support/gnutls/gnutls_3.6.14.bb5
-rw-r--r--meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch45
-rw-r--r--meta/recipes-support/gnutls/libtasn1_4.16.0.bb1
-rw-r--r--meta/recipes-support/libbsd/libbsd_0.10.0.bb6
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2602.patch52
-rw-r--r--meta/recipes-support/libcap/files/CVE-2023-2603.patch58
-rw-r--r--meta/recipes-support/libcap/libcap_2.32.bb2
-rw-r--r--meta/recipes-support/libksba/libksba/CVE-2022-3515.patch47
-rw-r--r--meta/recipes-support/libksba/libksba/CVE-2022-47629.patch69
-rw-r--r--meta/recipes-support/libksba/libksba_1.3.5.bb5
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch30
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch59
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch660
-rw-r--r--meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch74
-rw-r--r--meta/recipes-support/libpcre/libpcre2_10.34.bb4
-rw-r--r--meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch201
-rw-r--r--meta/recipes-support/libxslt/libxslt_1.1.34.bb5
-rw-r--r--meta/recipes-support/lz4/lz4_1.9.2.bb6
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2020-35525.patch21
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2020-35527.patch22
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2021-20223.patch23
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2022-35737.patch29
-rw-r--r--meta/recipes-support/sqlite/files/CVE-2023-7104.patch46
-rw-r--r--meta/recipes-support/sqlite/sqlite3_3.31.1.bb5
-rw-r--r--meta/recipes-support/vim/files/racefix.patch33
-rw-r--r--meta/recipes-support/vim/vim-tiny_9.0.bb (renamed from meta/recipes-support/vim/vim-tiny_8.2.bb)0
-rw-r--r--meta/recipes-support/vim/vim.inc37
-rw-r--r--meta/recipes-support/vim/vim_9.0.bb (renamed from meta/recipes-support/vim/vim_8.2.bb)0
-rwxr-xr-xscripts/create-pull-request2
-rwxr-xr-xscripts/git9
-rw-r--r--scripts/lib/buildstats.py4
-rw-r--r--scripts/lib/devtool/deploy.py8
-rw-r--r--scripts/lib/devtool/menuconfig.py2
-rw-r--r--scripts/lib/devtool/standard.py2
-rw-r--r--scripts/lib/recipetool/create.py4
-rw-r--r--scripts/lib/resulttool/report.py5
-rw-r--r--scripts/lib/resulttool/resultutils.py8
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py2
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py7
-rwxr-xr-xscripts/nativesdk-intercept/chgrp27
-rwxr-xr-xscripts/nativesdk-intercept/chown27
-rwxr-xr-xscripts/oe-depends-dot21
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py2
-rwxr-xr-xscripts/relocate_sdk.py10
-rwxr-xr-xscripts/runqemu48
-rwxr-xr-xscripts/wic2
868 files changed, 81668 insertions, 4561 deletions
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..7d2ce1f631
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,24 @@
+How to Report a Potential Vulnerability?
+========================================
+
+If you would like to report a public issue (for example, one with a released
+CVE number), please report it using the
+[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
+If you have a patch ready, submit it following the same procedure as any other
+patch as described in README.md.
+
+If you are dealing with a not-yet released or urgent issue, please send a
+message to security AT yoctoproject DOT org, including as many details as
+possible: the layer or software module affected, the recipe and its version,
+and any example code, if available.
+
+Branches maintained with security fixes
+---------------------------------------
+
+See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
+for detailed info regarding the policies and maintenance of Stable branches.
+
+The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
+releases of the Yocto Project. Versions in grey are no longer actively maintained with
+security patches, but well-tested patches may still be accepted for them for
+significant issues.
diff --git a/bitbake/SECURITY.md b/bitbake/SECURITY.md
new file mode 100644
index 0000000000..7d2ce1f631
--- /dev/null
+++ b/bitbake/SECURITY.md
@@ -0,0 +1,24 @@
+How to Report a Potential Vulnerability?
+========================================
+
+If you would like to report a public issue (for example, one with a released
+CVE number), please report it using the
+[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
+If you have a patch ready, submit it following the same procedure as any other
+patch as described in README.md.
+
+If you are dealing with a not-yet released or urgent issue, please send a
+message to security AT yoctoproject DOT org, including as many details as
+possible: the layer or software module affected, the recipe and its version,
+and any example code, if available.
+
+Branches maintained with security fixes
+---------------------------------------
+
+See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
+for detailed info regarding the policies and maintenance of Stable branches.
+
+The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
+releases of the Yocto Project. Versions in grey are no longer actively maintained with
+security patches, but well-tested patches may still be accepted for them for
+significant issues.
diff --git a/bitbake/bin/bitbake-getvar b/bitbake/bin/bitbake-getvar
new file mode 100755
index 0000000000..9423219253
--- /dev/null
+++ b/bitbake/bin/bitbake-getvar
@@ -0,0 +1,48 @@
+#! /usr/bin/env python3
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import io
+import os
+import sys
+
+bindir = os.path.dirname(__file__)
+topdir = os.path.dirname(bindir)
+sys.path[0:0] = [os.path.join(topdir, 'lib')]
+
+import bb.tinfoil
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Bitbake Query Variable")
+ parser.add_argument("variable", help="variable name to query")
+ parser.add_argument("-r", "--recipe", help="Recipe name to query", default=None, required=False)
+ parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
+ parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
+ parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
+ args = parser.parse_args()
+
+ if args.unexpand and not args.value:
+ print("--unexpand only makes sense with --value")
+ sys.exit(1)
+
+ if args.flag and not args.value:
+ print("--flag only makes sense with --value")
+ sys.exit(1)
+
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ if args.recipe:
+ tinfoil.prepare(quiet=2)
+ d = tinfoil.parse_recipe(args.recipe)
+ else:
+ tinfoil.prepare(quiet=2, config_only=True)
+ d = tinfoil.config_data
+ if args.flag:
+ print(str(d.getVarFlag(args.variable, args.flag, expand=(not args.unexpand))))
+ elif args.value:
+ print(str(d.getVar(args.variable, expand=(not args.unexpand))))
+ else:
+ bb.data.emit_var(args.variable, d=d, all=True)
diff --git a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
index 93ac18b78a..75e8dd69d9 100644
--- a/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
+++ b/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst
@@ -405,8 +405,8 @@ This fetcher supports the following parameters:
- *"nobranch":* Tells the fetcher to not check the SHA validation for
the branch when set to "1". The default is "0". Set this option for
- the recipe that refers to the commit that is valid for a tag instead
- of the branch.
+ the recipe that refers to the commit that is valid for any namespace
+ (branch, tag, ...) instead of the branch.
- *"bareclone":* Tells the fetcher to clone a bare clone into the
destination directory without checking out a working tree. Only the
diff --git a/bitbake/lib/bb/__init__.py b/bitbake/lib/bb/__init__.py
index c98e23ce3e..ba8039497f 100644
--- a/bitbake/lib/bb/__init__.py
+++ b/bitbake/lib/bb/__init__.py
@@ -15,6 +15,13 @@ import sys
if sys.version_info < (3, 5, 0):
raise RuntimeError("Sorry, python 3.5.0 or later is required for this version of bitbake")
+if sys.version_info < (3, 10, 0):
+ # With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
+ # https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
+ # https://bugs.ams1.psf.io/issue42888
+ # so ensure libgcc_s is loaded early on
+ import ctypes
+ libgcc_s = ctypes.CDLL('libgcc_s.so.1')
class BBHandledException(Exception):
"""
diff --git a/bitbake/lib/bb/command.py b/bitbake/lib/bb/command.py
index 98c945edb5..b8429b2773 100644
--- a/bitbake/lib/bb/command.py
+++ b/bitbake/lib/bb/command.py
@@ -20,6 +20,7 @@ Commands are queued in a CommandQueue
from collections import OrderedDict, defaultdict
+import io
import bb.event
import bb.cooker
import bb.remotedata
@@ -478,6 +479,17 @@ class CommandsSync:
d = command.remotedatastores[dsindex].varhistory
return getattr(d, method)(*args, **kwargs)
+ def dataStoreConnectorVarHistCmdEmit(self, command, params):
+ dsindex = params[0]
+ var = params[1]
+ oval = params[2]
+ val = params[3]
+ d = command.remotedatastores[params[4]]
+
+ o = io.StringIO()
+ command.remotedatastores[dsindex].varhistory.emit(var, oval, val, o, d)
+ return o.getvalue()
+
def dataStoreConnectorIncHistCmd(self, command, params):
dsindex = params[0]
method = params[1]
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py
index ac54d4378d..6743bce585 100644
--- a/bitbake/lib/bb/cooker.py
+++ b/bitbake/lib/bb/cooker.py
@@ -13,7 +13,6 @@ import sys, os, glob, os.path, re, time
import itertools
import logging
import multiprocessing
-import sre_constants
import threading
from io import StringIO, UnsupportedOperation
from contextlib import closing
@@ -1795,7 +1794,7 @@ class CookerCollectFiles(object):
try:
re.compile(mask)
bbmasks.append(mask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
# Then validate the combined regular expressions. This should never
@@ -1803,7 +1802,7 @@ class CookerCollectFiles(object):
bbmask = "|".join(bbmasks)
try:
bbmask_compiled = re.compile(bbmask)
- except sre_constants.error:
+ except re.error:
collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
bbmask = None
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py
index b0683c5180..1d21e00a1c 100644
--- a/bitbake/lib/bb/data.py
+++ b/bitbake/lib/bb/data.py
@@ -301,6 +301,7 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
value += "\n_remove of %s" % r
deps |= r2.references
deps = deps | (keys & r2.execs)
+ value = handle_contains(value, r2.contains, d)
return value
if "vardepvalue" in varflags:
diff --git a/bitbake/lib/bb/fetch2/git.py b/bitbake/lib/bb/fetch2/git.py
index f6f6b63a74..cad1ae8207 100644
--- a/bitbake/lib/bb/fetch2/git.py
+++ b/bitbake/lib/bb/fetch2/git.py
@@ -44,7 +44,8 @@ Supported SRC_URI options are:
- nobranch
Don't check the SHA validation for branch. set this option for the recipe
- referring to commit which is valid in tag instead of branch.
+ referring to commit which is valid in any namespace (branch, tag, ...)
+ instead of branch.
The default is "0", set nobranch=1 if needed.
- usehead
@@ -63,6 +64,7 @@ import errno
import fnmatch
import os
import re
+import shlex
import subprocess
import tempfile
import bb
@@ -224,7 +226,12 @@ class Git(FetchMethod):
ud.shallow = False
if ud.usehead:
- ud.unresolvedrev['default'] = 'HEAD'
+ # When usehead is set let's associate 'HEAD' with the unresolved
+ # rev of this repository. This will get resolved into a revision
+ # later. If an actual revision happens to have also been provided
+ # then this setting will be overridden.
+ for name in ud.names:
+ ud.unresolvedrev[name] = 'HEAD'
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
@@ -347,7 +354,7 @@ class Git(FetchMethod):
# We do this since git will use a "-l" option automatically for local urls where possible
if repourl.startswith("file://"):
repourl = repourl[7:]
- clone_cmd = "LANG=C %s clone --bare --mirror \"%s\" %s --progress" % (ud.basecmd, repourl, ud.clonedir)
+ clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir)
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
progresshandler = GitProgressHandler(d)
@@ -359,8 +366,12 @@ class Git(FetchMethod):
if "origin" in output:
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
- runfetchcmd("%s remote add --mirror=fetch origin \"%s\"" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
- fetch_cmd = "LANG=C %s fetch -f --progress \"%s\" refs/*:refs/*" % (ud.basecmd, repourl)
+ runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir)
+
+ if ud.nobranch:
+ fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
+ else:
+ fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl))
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
progresshandler = GitProgressHandler(d)
@@ -554,7 +565,7 @@ class Git(FetchMethod):
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
repourl = self._get_repo_url(ud)
- runfetchcmd("%s remote set-url origin \"%s\"" % (ud.basecmd, repourl), d, workdir=destdir)
+ runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=destdir)
if self._contains_lfs(ud, d, destdir):
if need_lfs and not self._find_git_lfs(d):
@@ -682,8 +693,8 @@ class Git(FetchMethod):
d.setVar('_BB_GIT_IN_LSREMOTE', '1')
try:
repourl = self._get_repo_url(ud)
- cmd = "%s ls-remote \"%s\" %s" % \
- (ud.basecmd, repourl, search)
+ cmd = "%s ls-remote %s %s" % \
+ (ud.basecmd, shlex.quote(repourl), search)
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, cmd, repourl)
output = runfetchcmd(cmd, d, True)
diff --git a/bitbake/lib/bb/fetch2/wget.py b/bitbake/lib/bb/fetch2/wget.py
index 5676d3fd27..368c644337 100644
--- a/bitbake/lib/bb/fetch2/wget.py
+++ b/bitbake/lib/bb/fetch2/wget.py
@@ -52,6 +52,12 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
class Wget(FetchMethod):
+
+ # CDNs like CloudFlare may do a 'browser integrity test' which can fail
+ # with the standard wget/urllib User-Agent, so pretend to be a modern
+ # browser.
+ user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
+
"""Class to fetch urls via 'wget'"""
def supports(self, ud, d):
"""
@@ -91,10 +97,9 @@ class Wget(FetchMethod):
fetchcmd = self.basecmd
- if 'downloadfilename' in ud.parm:
- localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
- bb.utils.mkdirhier(os.path.dirname(localpath))
- fetchcmd += " -O %s" % shlex.quote(localpath)
+ localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
+ bb.utils.mkdirhier(os.path.dirname(localpath))
+ fetchcmd += " -O %s" % shlex.quote(localpath)
if ud.user and ud.pswd:
fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
@@ -108,6 +113,10 @@ class Wget(FetchMethod):
self._runwget(ud, d, fetchcmd, False)
+ # Remove the ".tmp" and move the file into position atomically
+ # Our lock prevents multiple writers but mirroring code may grab incomplete files
+ os.rename(localpath, localpath[:-4])
+
# Sanity check since wget can pretend it succeed when it didn't
# Also, this used to happen if sourceforge sent us to the mirror page
if not os.path.exists(ud.localpath):
@@ -300,7 +309,7 @@ class Wget(FetchMethod):
# Some servers (FusionForge, as used on Alioth) require that the
# optional Accept header is set.
r.add_header("Accept", "*/*")
- r.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12")
+ r.add_header("User-Agent", self.user_agent)
def add_basic_auth(login_str, request):
'''Adds Basic auth to http request, pass in login:password as string'''
import base64
@@ -404,9 +413,8 @@ class Wget(FetchMethod):
"""
f = tempfile.NamedTemporaryFile()
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
- agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
fetchcmd = self.basecmd
- fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
+ fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
try:
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
fetchresult = f.read()
diff --git a/bitbake/lib/bb/monitordisk.py b/bitbake/lib/bb/monitordisk.py
index e7c07264a8..4d243af30b 100644
--- a/bitbake/lib/bb/monitordisk.py
+++ b/bitbake/lib/bb/monitordisk.py
@@ -229,9 +229,10 @@ class diskMonitor:
freeInode = st.f_favail
if minInode and freeInode < minInode:
- # Some filesystems use dynamic inodes so can't run out
- # (e.g. btrfs). This is reported by the inode count being 0.
- if st.f_files == 0:
+ # Some filesystems use dynamic inodes so can't run out.
+ # This is reported by the inode count being 0 (btrfs) or the free
+ # inode count being -1 (cephfs).
+ if st.f_files == 0 or st.f_favail == -1:
self.devDict[k][2] = None
continue
# Always show warning, the self.checked would always be False if the action is WARN
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index a513b0983b..886eef1f27 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -24,6 +24,7 @@ import pickle
from multiprocessing import Process
import shlex
import pprint
+import time
bblogger = logging.getLogger("BitBake")
logger = logging.getLogger("BitBake.RunQueue")
@@ -142,6 +143,55 @@ class RunQueueScheduler(object):
self.buildable.append(tid)
self.rev_prio_map = None
+ self.is_pressure_usable()
+
+ def is_pressure_usable(self):
+ """
+ If monitoring pressure, return True if pressure files can be open and read. For example
+ openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported)
+ is returned.
+ """
+ if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
+ try:
+ with open("/proc/pressure/cpu") as cpu_pressure_fds, \
+ open("/proc/pressure/io") as io_pressure_fds, \
+ open("/proc/pressure/memory") as memory_pressure_fds:
+
+ self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
+ self.prev_pressure_time = time.time()
+ self.check_pressure = True
+ except:
+ bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure")
+ self.check_pressure = False
+ else:
+ self.check_pressure = False
+
+ def exceeds_max_pressure(self):
+ """
+ Monitor the difference in total pressure at least once per second, if
+ BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold.
+ """
+ if self.check_pressure:
+ with open("/proc/pressure/cpu") as cpu_pressure_fds, \
+ open("/proc/pressure/io") as io_pressure_fds, \
+ open("/proc/pressure/memory") as memory_pressure_fds:
+ # extract "total" from /proc/pressure/{cpu|io}
+ curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
+ curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
+ curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
+ exceeds_cpu_pressure = self.rq.max_cpu_pressure and (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) > self.rq.max_cpu_pressure
+ exceeds_io_pressure = self.rq.max_io_pressure and (float(curr_io_pressure) - float(self.prev_io_pressure)) > self.rq.max_io_pressure
+ exceeds_memory_pressure = self.rq.max_memory_pressure and (float(curr_memory_pressure) - float(self.prev_memory_pressure)) > self.rq.max_memory_pressure
+ now = time.time()
+ if now - self.prev_pressure_time > 1.0:
+ self.prev_cpu_pressure = curr_cpu_pressure
+ self.prev_io_pressure = curr_io_pressure
+ self.prev_memory_pressure = curr_memory_pressure
+ self.prev_pressure_time = now
+ return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure)
+ return False
def next_buildable_task(self):
"""
@@ -155,6 +205,12 @@ class RunQueueScheduler(object):
if not buildable:
return None
+ # Bitbake requires that at least one task be active. Only check for pressure if
+ # this is the case, otherwise the pressure limitation could result in no tasks
+ # being active and no new tasks started thereby, at times, breaking the scheduler.
+ if self.rq.stats.active and self.exceeds_max_pressure():
+ return None
+
# Filter out tasks that have a max number of threads that have been exceeded
skip_buildable = {}
for running in self.rq.runq_running.difference(self.rq.runq_complete):
@@ -1700,6 +1756,9 @@ class RunQueueExecute:
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
+ self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
+ self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
+ self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
self.sq_buildable = set()
self.sq_running = set()
@@ -1735,6 +1794,29 @@ class RunQueueExecute:
if self.number_tasks <= 0:
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
+ lower_limit = 1.0
+ upper_limit = 1000000.0
+ if self.max_cpu_pressure:
+ self.max_cpu_pressure = float(self.max_cpu_pressure)
+ if self.max_cpu_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit))
+ if self.max_cpu_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure))
+
+ if self.max_io_pressure:
+ self.max_io_pressure = float(self.max_io_pressure)
+ if self.max_io_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit))
+ if self.max_io_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
+
+ if self.max_memory_pressure:
+ self.max_memory_pressure = float(self.max_memory_pressure)
+ if self.max_memory_pressure < lower_limit:
+ bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit))
+ if self.max_memory_pressure > upper_limit:
+ bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
+
# List of setscene tasks which we've covered
self.scenequeue_covered = set()
# List of tasks which are covered (including setscene ones)
@@ -1893,6 +1975,20 @@ class RunQueueExecute:
self.setbuildable(revdep)
logger.debug(1, "Marking task %s as buildable", revdep)
+ found = None
+ for t in sorted(self.sq_deferred.copy()):
+ if self.sq_deferred[t] == task:
+ # Allow the next deferred task to run. Any other deferred tasks should be deferred after that task.
+ # We shouldn't allow all to run at once as it is prone to races.
+ if not found:
+ bb.note("Deferred task %s now buildable" % t)
+ del self.sq_deferred[t]
+ update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
+ found = t
+ else:
+ bb.note("Deferring %s after %s" % (t, found))
+ self.sq_deferred[t] = found
+
def task_complete(self, task):
self.stats.taskCompleted()
bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
@@ -2002,8 +2098,6 @@ class RunQueueExecute:
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
self.sq_task_failoutright(nexttask)
return True
- else:
- self.sqdata.outrightfail.remove(nexttask)
if nexttask in self.sqdata.outrightfail:
logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
self.sq_task_failoutright(nexttask)
@@ -2154,7 +2248,8 @@ class RunQueueExecute:
if self.sq_deferred:
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
- self.sq_task_failoutright(tid)
+ if tid not in self.runq_complete:
+ self.sq_task_failoutright(tid)
return True
if len(self.failed_tids) != 0:
@@ -2268,10 +2363,16 @@ class RunQueueExecute:
self.updated_taskhash_queue.remove((tid, unihash))
if unihash != self.rqdata.runtaskentries[tid].unihash:
- hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
- self.rqdata.runtaskentries[tid].unihash = unihash
- bb.parse.siggen.set_unihash(tid, unihash)
- toprocess.add(tid)
+ # Make sure we rehash any other tasks with the same task hash that we're deferred against.
+ torehash = [tid]
+ for deftid in self.sq_deferred:
+ if self.sq_deferred[deftid] == tid:
+ torehash.append(deftid)
+ for hashtid in torehash:
+ hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash))
+ self.rqdata.runtaskentries[hashtid].unihash = unihash
+ bb.parse.siggen.set_unihash(hashtid, unihash)
+ toprocess.add(hashtid)
# Work out all tasks which depend upon these
total = set()
@@ -2410,6 +2511,14 @@ class RunQueueExecute:
if update_tasks:
self.sqdone = False
+ for mc in sorted(self.sqdata.multiconfigs):
+ for tid in sorted([t[0] for t in update_tasks]):
+ if mc_from_tid(tid) != mc:
+ continue
+ h = pending_hash_index(tid, self.rqdata)
+ if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
+ self.sq_deferred[tid] = self.sqdata.hashes[h]
+ bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
for (tid, harddepfail, origvalid) in update_tasks:
@@ -2750,6 +2859,19 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
sqdata.stamppresent = set()
sqdata.valid = set()
+ sqdata.hashes = {}
+ sqrq.sq_deferred = {}
+ for mc in sorted(sqdata.multiconfigs):
+ for tid in sorted(sqdata.sq_revdeps):
+ if mc_from_tid(tid) != mc:
+ continue
+ h = pending_hash_index(tid, rqdata)
+ if h not in sqdata.hashes:
+ sqdata.hashes[h] = tid
+ else:
+ sqrq.sq_deferred[tid] = sqdata.hashes[h]
+ bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
+
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
@@ -2761,6 +2883,8 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
sqdata.stamppresent.remove(tid)
if tid in sqdata.valid:
sqdata.valid.remove(tid)
+ if tid in sqdata.outrightfail:
+ sqdata.outrightfail.remove(tid)
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
@@ -2788,32 +2912,20 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
- sqdata.hashes = {}
- sqrq.sq_deferred = {}
- for mc in sorted(sqdata.multiconfigs):
- for tid in sorted(sqdata.sq_revdeps):
- if mc_from_tid(tid) != mc:
- continue
- if tid in sqdata.stamppresent:
- continue
- if tid in sqdata.valid:
- continue
- if tid in sqdata.noexec:
- continue
- if tid in sqrq.scenequeue_notcovered:
- continue
- if tid in sqrq.scenequeue_covered:
- continue
-
- sqdata.outrightfail.add(tid)
-
- h = pending_hash_index(tid, rqdata)
- if h not in sqdata.hashes:
- sqdata.hashes[h] = tid
- else:
- sqrq.sq_deferred[tid] = sqdata.hashes[h]
- bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
-
+ for tid in tids:
+ if tid in sqdata.stamppresent:
+ continue
+ if tid in sqdata.valid:
+ continue
+ if tid in sqdata.noexec:
+ continue
+ if tid in sqrq.scenequeue_covered:
+ continue
+ if tid in sqrq.scenequeue_notcovered:
+ continue
+ if tid in sqrq.sq_deferred:
+ continue
+ sqdata.outrightfail.add(tid)
class TaskFailure(Exception):
"""
diff --git a/bitbake/lib/bb/siggen.py b/bitbake/lib/bb/siggen.py
index 26fa7f05ce..9d4f67aa90 100644
--- a/bitbake/lib/bb/siggen.py
+++ b/bitbake/lib/bb/siggen.py
@@ -318,7 +318,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
else:
sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[tid]
- bb.utils.mkdirhier(os.path.dirname(sigfile))
+ with bb.utils.umask(0o002):
+ bb.utils.mkdirhier(os.path.dirname(sigfile))
data = {}
data['task'] = task
diff --git a/bitbake/lib/bb/tests/codeparser.py b/bitbake/lib/bb/tests/codeparser.py
index f485204791..f1c4f618d8 100644
--- a/bitbake/lib/bb/tests/codeparser.py
+++ b/bitbake/lib/bb/tests/codeparser.py
@@ -412,6 +412,32 @@ esac
# Check final value
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
+ def test_contains_vardeps_override_operators(self):
+ # Check override operators handle dependencies correctly with the contains functionality
+ expr_plain = 'testval'
+ expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
+ expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
+ expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
+ # Check dependencies
+ self.d.setVar('ANOTHERVAR', expr_plain)
+ self.d.prependVar('ANOTHERVAR', expr_prepend)
+ self.d.appendVar('ANOTHERVAR', expr_append)
+ self.d.setVar('ANOTHERVAR:remove', expr_remove)
+ self.d.setVar('TESTVAR1', 'blah')
+ self.d.setVar('TESTVAR2', 'testval2')
+ self.d.setVar('TESTVAR3', 'no-testval')
+ deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
+ self.assertEqual(sorted(values.splitlines()),
+ sorted([
+ expr_prepend + expr_plain + expr_append,
+ '_remove of ' + expr_remove,
+ 'TESTVAR1{testval1} = Unset',
+ 'TESTVAR2{testval2} = Set',
+ 'TESTVAR3{no-testval} = Set',
+ ]))
+ # Check final value
+ self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
+
#Currently no wildcard support
#def test_vardeps_wildcards(self):
# self.d.setVar("oe_libinstall", "echo test")
diff --git a/bitbake/lib/bb/tests/fetch.py b/bitbake/lib/bb/tests/fetch.py
index 301c468399..61dd5cccaf 100644
--- a/bitbake/lib/bb/tests/fetch.py
+++ b/bitbake/lib/bb/tests/fetch.py
@@ -650,6 +650,58 @@ class FetcherLocalTest(FetcherTest):
with self.assertRaises(bb.fetch2.UnpackError):
self.fetchUnpack(['file://a;subdir=/bin/sh'])
+ def test_local_gitfetch_usehead(self):
+ # Create dummy local Git repo
+ src_dir = tempfile.mkdtemp(dir=self.tempdir,
+ prefix='gitfetch_localusehead_')
+ src_dir = os.path.abspath(src_dir)
+ bb.process.run("git init", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit'",
+ cwd=src_dir)
+ # Use other branch than master
+ bb.process.run("git checkout -b my-devel", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
+ cwd=src_dir)
+ stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
+ orig_rev = stdout[0].strip()
+
+ # Fetch and check revision
+ self.d.setVar("SRCREV", "AUTOINC")
+ url = "git://" + src_dir + ";protocol=file;usehead=1"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ fetcher.unpack(self.unpackdir)
+ stdout = bb.process.run("git rev-parse HEAD",
+ cwd=os.path.join(self.unpackdir, 'git'))
+ unpack_rev = stdout[0].strip()
+ self.assertEqual(orig_rev, unpack_rev)
+
+ def test_local_gitfetch_usehead_withname(self):
+ # Create dummy local Git repo
+ src_dir = tempfile.mkdtemp(dir=self.tempdir,
+ prefix='gitfetch_localusehead_')
+ src_dir = os.path.abspath(src_dir)
+ bb.process.run("git init", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit'",
+ cwd=src_dir)
+ # Use other branch than master
+ bb.process.run("git checkout -b my-devel", cwd=src_dir)
+ bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
+ cwd=src_dir)
+ stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
+ orig_rev = stdout[0].strip()
+
+ # Fetch and check revision
+ self.d.setVar("SRCREV", "AUTOINC")
+ url = "git://" + src_dir + ";protocol=file;usehead=1;name=newName"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ fetcher.unpack(self.unpackdir)
+ stdout = bb.process.run("git rev-parse HEAD",
+ cwd=os.path.join(self.unpackdir, 'git'))
+ unpack_rev = stdout[0].strip()
+ self.assertEqual(orig_rev, unpack_rev)
+
class FetcherNoNetworkTest(FetcherTest):
def setUp(self):
super().setUp()
@@ -1286,7 +1338,7 @@ class FetchCheckStatusTest(FetcherTest):
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz",
"https://yoctoproject.org/",
- "https://yoctoproject.org/documentation",
+ "https://docs.yoctoproject.org/",
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
"ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz",
@@ -1698,7 +1750,7 @@ class GitShallowTest(FetcherTest):
self.add_empty_file('bsub', cwd=smdir)
self.git('submodule init', cwd=self.srcdir)
- self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+ self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
self.git('submodule update', cwd=self.srcdir)
self.git('commit -m submodule -a', cwd=self.srcdir)
@@ -1730,7 +1782,7 @@ class GitShallowTest(FetcherTest):
self.add_empty_file('bsub', cwd=smdir)
self.git('submodule init', cwd=self.srcdir)
- self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
+ self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
self.git('submodule update', cwd=self.srcdir)
self.git('commit -m submodule -a', cwd=self.srcdir)
diff --git a/bitbake/lib/bb/tinfoil.py b/bitbake/lib/bb/tinfoil.py
index 28f1e5623f..8bec8cbaf6 100644
--- a/bitbake/lib/bb/tinfoil.py
+++ b/bitbake/lib/bb/tinfoil.py
@@ -53,6 +53,10 @@ class TinfoilDataStoreConnectorVarHistory:
def remoteCommand(self, cmd, *args, **kwargs):
return self.tinfoil.run_command('dataStoreConnectorVarHistCmd', self.dsindex, cmd, args, kwargs)
+ def emit(self, var, oval, val, o, d):
+ ret = self.tinfoil.run_command('dataStoreConnectorVarHistCmdEmit', self.dsindex, var, oval, val, d.dsindex)
+ o.write(ret)
+
def __getattr__(self, name):
if not hasattr(bb.data_smart.VariableHistory, name):
raise AttributeError("VariableHistory has no such method %s" % name)
diff --git a/bitbake/lib/bb/ui/knotty.py b/bitbake/lib/bb/ui/knotty.py
index e70c246400..d1f74389db 100644
--- a/bitbake/lib/bb/ui/knotty.py
+++ b/bitbake/lib/bb/ui/knotty.py
@@ -227,7 +227,9 @@ class TerminalFilter(object):
def keepAlive(self, t):
if not self.cuu:
- print("Bitbake still alive (%ds)" % t)
+ print("Bitbake still alive (no events for %ds). Active tasks:" % t)
+ for t in self.helper.running_tasks:
+ print(t)
sys.stdout.flush()
def updateFooter(self):
@@ -597,7 +599,8 @@ def main(server, eventHandler, params, tf = TerminalFilter):
warnings = 0
taskfailures = []
- printinterval = 5000
+ printintervaldelta = 10 * 60 # 10 minutes
+ printinterval = printintervaldelta
lastprint = time.time()
termfilter = tf(main, helper, console_handlers, params.options.quiet)
@@ -607,7 +610,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
try:
if (lastprint + printinterval) <= time.time():
termfilter.keepAlive(printinterval)
- printinterval += 5000
+ printinterval += printintervaldelta
event = eventHandler.waitEvent(0)
if event is None:
if main.shutdown > 1:
@@ -638,7 +641,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
if isinstance(event, logging.LogRecord):
lastprint = time.time()
- printinterval = 5000
+ printinterval = printintervaldelta
if event.levelno >= bb.msg.BBLogFormatter.ERROR:
errors = errors + 1
return_value = 1
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py
index fab16ffc58..34fa0b7a67 100644
--- a/bitbake/lib/bb/utils.py
+++ b/bitbake/lib/bb/utils.py
@@ -421,12 +421,14 @@ def better_eval(source, locals, extraglobals = None):
return eval(source, ctx, locals)
@contextmanager
-def fileslocked(files):
+def fileslocked(files, *args, **kwargs):
"""Context manager for locking and unlocking file locks."""
locks = []
if files:
for lockfile in files:
- locks.append(bb.utils.lockfile(lockfile))
+ l = bb.utils.lockfile(lockfile, *args, **kwargs)
+ if l is not None:
+ locks.append(l)
try:
yield
@@ -459,9 +461,16 @@ def lockfile(name, shared=False, retry=True, block=False):
consider the possibility of sending a signal to the process to break
out - at which point you want block=True rather than retry=True.
"""
+ basename = os.path.basename(name)
+ if len(basename) > 255:
+ root, ext = os.path.splitext(basename)
+ basename = root[:255 - len(ext)] + ext
+
dirname = os.path.dirname(name)
mkdirhier(dirname)
+ name = os.path.join(dirname, basename)
+
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
@@ -495,7 +504,7 @@ def lockfile(name, shared=False, retry=True, block=False):
return lf
lf.close()
except OSError as e:
- if e.errno == errno.EACCES:
+ if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG:
logger.error("Unable to acquire lock '%s', %s",
e.strerror, name)
sys.exit(1)
@@ -960,6 +969,17 @@ def which(path, item, direction = 0, history = False, executable=False):
return "", hist
return ""
+@contextmanager
+def umask(new_mask):
+ """
+ Context manager to set the umask to a specific mask, and restore it afterwards.
+ """
+ current_mask = os.umask(new_mask)
+ try:
+ yield
+ finally:
+ os.umask(current_mask)
+
def to_boolean(string, default=None):
if not string:
return default
@@ -1561,21 +1581,22 @@ def set_process_name(name):
# export common proxies variables from datastore to environment
def export_proxies(d):
- import os
+ """ export common proxies variables from datastore to environment """
variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
- 'GIT_PROXY_COMMAND']
+ 'GIT_PROXY_COMMAND', 'SSL_CERT_FILE', 'SSL_CERT_DIR']
exported = False
- for v in variables:
- if v in os.environ.keys():
+ origenv = d.getVar("BB_ORIGENV")
+
+ for name in variables:
+ value = d.getVar(name)
+ if not value and origenv:
+ value = origenv.getVar(name)
+ if value:
+ os.environ[name] = value
exported = True
- else:
- v_proxy = d.getVar(v)
- if v_proxy is not None:
- os.environ[v] = v_proxy
- exported = True
return exported
diff --git a/bitbake/lib/bblayers/layerindex.py b/bitbake/lib/bblayers/layerindex.py
index 95b67a6621..f64d18e819 100644
--- a/bitbake/lib/bblayers/layerindex.py
+++ b/bitbake/lib/bblayers/layerindex.py
@@ -206,6 +206,7 @@ class LayerIndexPlugin(ActionPlugin):
"""
args.show_only = True
args.ignore = []
+ args.shallow = True
self.do_layerindex_fetch(args)
def register_commands(self, sp):
diff --git a/bitbake/lib/toaster/toastergui/api.py b/bitbake/lib/toaster/toastergui/api.py
index b4cdc335ef..e367bd910e 100644
--- a/bitbake/lib/toaster/toastergui/api.py
+++ b/bitbake/lib/toaster/toastergui/api.py
@@ -11,7 +11,7 @@ import os
import re
import logging
import json
-import subprocess
+import glob
from collections import Counter
from orm.models import Project, ProjectTarget, Build, Layer_Version
@@ -227,20 +227,18 @@ class XhrSetDefaultImageUrl(View):
# same logical name
# * Each project that uses a layer will have its own
# LayerVersion and Project Layer for it
-# * During the Paroject delete process, when the last
+# * During the Project delete process, when the last
# LayerVersion for a 'local_source_dir' layer is deleted
# then the Layer record is deleted to remove orphans
#
def scan_layer_content(layer,layer_version):
# if this is a local layer directory, we can immediately scan its content
- if layer.local_source_dir:
+ if os.path.isdir(layer.local_source_dir):
try:
# recipes-*/*/*.bb
- cmd = '%s %s' % ('ls', os.path.join(layer.local_source_dir,'recipes-*/*/*.bb'))
- recipes_list = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
- recipes_list = recipes_list.decode("utf-8").strip()
- if recipes_list and 'No such' not in recipes_list:
+ recipes_list = glob.glob(os.path.join(layer.local_source_dir, 'recipes-*/*/*.bb'))
+ for recipe in recipes_list:
for recipe in recipes_list.split('\n'):
recipe_path = recipe[recipe.rfind('recipes-'):]
recipe_name = recipe[recipe.rfind('/')+1:].replace('.bb','')
@@ -260,6 +258,9 @@ def scan_layer_content(layer,layer_version):
except Exception as e:
logger.warning("ERROR:scan_layer_content: %s" % e)
+ else:
+ logger.warning("ERROR: wrong path given")
+ raise KeyError("local_source_dir")
class XhrLayer(View):
""" Delete, Get, Add and Update Layer information
@@ -456,15 +457,18 @@ class XhrLayer(View):
'layerdetailurl':
layer_dep.get_detailspage_url(project.pk)})
- # Scan the layer's content and update components
- scan_layer_content(layer,layer_version)
+ # Only scan_layer_content if layer is local
+ if layer_data.get('local_source_dir', None):
+ # Scan the layer's content and update components
+ scan_layer_content(layer,layer_version)
except Layer_Version.DoesNotExist:
return error_response("layer-dep-not-found")
except Project.DoesNotExist:
return error_response("project-not-found")
- except KeyError:
- return error_response("incorrect-parameters")
+ except KeyError as e:
+ _log("KeyError: %s" % e)
+ return error_response(f"incorrect-parameters")
return JsonResponse({'error': "ok",
'imported_layer': {
diff --git a/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst b/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst
index c9622d3647..6a44511af2 100644
--- a/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst
+++ b/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst
@@ -222,19 +222,10 @@ an entire Linux distribution, including the toolchain, from source.
.. tip::
You can significantly speed up your build and guard against fetcher
- failures by using mirrors. To use mirrors, add these lines to your
- local.conf file in the Build directory: ::
+ failures by using mirrors. To use mirrors, add this line to your
+ ``local.conf`` file in the :term:`Build Directory`: ::
- SSTATE_MIRRORS = "\
- file://.* http://sstate.yoctoproject.org/dev/PATH;downloadfilename=PATH \n \
- file://.* http://sstate.yoctoproject.org/&YOCTO_DOC_VERSION_MINUS_ONE;/PATH;downloadfilename=PATH \n \
- file://.* http://sstate.yoctoproject.org/&YOCTO_DOC_VERSION;/PATH;downloadfilename=PATH \n \
- "
-
-
- The previous examples showed how to add sstate paths for Yocto Project
- &YOCTO_DOC_VERSION_MINUS_ONE;, &YOCTO_DOC_VERSION;, and a development
- area. For a complete index of sstate locations, see http://sstate.yoctoproject.org/.
+ SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
#. **Start the Build:** Continue with the following command to build an OS
image for the target, which is ``core-image-sato`` in this example:
diff --git a/documentation/conf.py b/documentation/conf.py
index df67a5cdf2..e9078e054e 100644
--- a/documentation/conf.py
+++ b/documentation/conf.py
@@ -97,6 +97,7 @@ extlinks = {
'yocto_git': ('https://git.yoctoproject.org%s', None),
'oe_home': ('https://www.openembedded.org%s', None),
'oe_lists': ('https://lists.openembedded.org%s', None),
+ 'oe_git': ('https://git.openembedded.org%s', None),
}
# Intersphinx config to use cross reference with Bitbake user manual
diff --git a/documentation/dev-manual/dev-manual-common-tasks.rst b/documentation/dev-manual/dev-manual-common-tasks.rst
index 159da6a019..d1dde6d0f3 100644
--- a/documentation/dev-manual/dev-manual-common-tasks.rst
+++ b/documentation/dev-manual/dev-manual-common-tasks.rst
@@ -2628,7 +2628,7 @@ Recipe Syntax
Understanding recipe file syntax is important for writing recipes. The
following list overviews the basic items that make up a BitBake recipe
file. For more complete BitBake syntax descriptions, see the
-":doc:`bitbake-user-manual/bitbake-user-manual-metadata`"
+":doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`"
chapter of the BitBake User Manual.
- *Variable Assignments and Manipulations:* Variable assignments allow
@@ -3854,7 +3854,7 @@ Setting Up and Running a Multiple Configuration Build
To accomplish a multiple configuration build, you must define each
target's configuration separately using a parallel configuration file in
-the :term:`Build Directory`, and you
+the :term:`Build Directory` or configuration directory within a layer, and you
must follow a required file hierarchy. Additionally, you must enable the
multiple configuration builds in your ``local.conf`` file.
@@ -3862,47 +3862,47 @@ Follow these steps to set up and execute multiple configuration builds:
- *Create Separate Configuration Files*: You need to create a single
configuration file for each build target (each multiconfig).
- Minimally, each configuration file must define the machine and the
- temporary directory BitBake uses for the build. Suggested practice
- dictates that you do not overlap the temporary directories used
- during the builds. However, it is possible that you can share the
- temporary directory
- (:term:`TMPDIR`). For example,
- consider a scenario with two different multiconfigs for the same
+ The configuration definitions are implementation dependent but often
+ each configuration file will define the machine and the
+ temporary directory BitBake uses for the build. Whether the same
+ temporary directory (:term:`TMPDIR`) can be shared will depend on what is
+ similar and what is different between the configurations. Multiple MACHINE
+ targets can share the same (:term:`TMPDIR`) as long as the rest of the
+ configuration is the same, multiple DISTRO settings would need separate
+ (:term:`TMPDIR`) directories.
+
+ For example, consider a scenario with two different multiconfigs for the same
:term:`MACHINE`: "qemux86" built
for two distributions such as "poky" and "poky-lsb". In this case,
- you might want to use the same ``TMPDIR``.
+ you would need to use the different :term:`TMPDIR`.
Here is an example showing the minimal statements needed in a
configuration file for a "qemux86" target whose temporary build
- directory is ``tmpmultix86``:
- ::
+ directory is ``tmpmultix86``::
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmpmultix86"
The location for these multiconfig configuration files is specific.
- They must reside in the current build directory in a sub-directory of
- ``conf`` named ``multiconfig``. Following is an example that defines
+ They must reside in the current :term:`Build Directory` in a sub-directory of
+ ``conf`` named ``multiconfig`` or within a layer's ``conf`` directory
+ under a directory named ``multiconfig``. Following is an example that defines
two configuration files for the "x86" and "arm" multiconfigs:
.. image:: figures/multiconfig_files.png
:align: center
+ :width: 50%
- The reason for this required file hierarchy is because the ``BBPATH``
- variable is not constructed until the layers are parsed.
- Consequently, using the configuration file as a pre-configuration
- file is not possible unless it is located in the current working
- directory.
+ The usual :term:`BBPATH` search path is used to locate multiconfig files in
+ a similar way to other conf files.
- *Add the BitBake Multi-configuration Variable to the Local
Configuration File*: Use the
:term:`BBMULTICONFIG`
variable in your ``conf/local.conf`` configuration file to specify
each multiconfig. Continuing with the example from the previous
- figure, the ``BBMULTICONFIG`` variable needs to enable two
- multiconfigs: "x86" and "arm" by specifying each configuration file:
- ::
+ figure, the :term:`BBMULTICONFIG` variable needs to enable two
+ multiconfigs: "x86" and "arm" by specifying each configuration file::
BBMULTICONFIG = "x86 arm"
@@ -3916,13 +3916,11 @@ Follow these steps to set up and execute multiple configuration builds:
with "".
- *Launch BitBake*: Use the following BitBake command form to launch
- the multiple configuration build:
- ::
+ the multiple configuration build::
$ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ]
- For the example in this section, the following command applies:
- ::
+ For the example in this section, the following command applies::
$ bitbake mc:x86:core-image-minimal mc:arm:core-image-sato mc::core-image-base
@@ -3937,7 +3935,7 @@ Follow these steps to set up and execute multiple configuration builds:
Support for multiple configuration builds in the Yocto Project &DISTRO;
(&DISTRO_NAME;) Release does not include Shared State (sstate)
optimizations. Consequently, if a build uses the same object twice
- in, for example, two different ``TMPDIR``
+ in, for example, two different :term:`TMPDIR`
directories, the build either loads from an existing sstate cache for
that build at the start or builds the object fresh.
@@ -3958,38 +3956,34 @@ essentially that the
To enable dependencies in a multiple configuration build, you must
declare the dependencies in the recipe using the following statement
-form:
-::
+form::
task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend"
To better show how to use this statement, consider the example scenario
from the first paragraph of this section. The following statement needs
-to be added to the recipe that builds the ``core-image-sato`` image:
-::
+to be added to the recipe that builds the ``core-image-sato`` image::
do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_rootfs"
In this example, the `from_multiconfig` is "x86". The `to_multiconfig` is "arm". The
-task on which the ``do_image`` task in the recipe depends is the
-``do_rootfs`` task from the ``core-image-minimal`` recipe associated
+task on which the :ref:`ref-tasks-image` task in the recipe depends is the
+:ref:`ref-tasks-rootfs` task from the ``core-image-minimal`` recipe associated
with the "arm" multiconfig.
Once you set up this dependency, you can build the "x86" multiconfig
-using a BitBake command as follows:
-::
+using a BitBake command as follows::
$ bitbake mc:x86:core-image-sato
This command executes all the tasks needed to create the
``core-image-sato`` image for the "x86" multiconfig. Because of the
-dependency, BitBake also executes through the ``do_rootfs`` task for the
+dependency, BitBake also executes through the :ref:`ref-tasks-rootfs` task for the
"arm" multiconfig build.
Having a recipe depend on the root filesystem of another build might not
seem that useful. Consider this change to the statement in the
-``core-image-sato`` recipe:
-::
+``core-image-sato`` recipe::
do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_image"
@@ -4967,7 +4961,7 @@ configuration would be as follows:
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
- IMAGE_INSTALL_append = "lib32-glib-2.0"
+ IMAGE_INSTALL_append = " lib32-glib-2.0"
This example enables an additional library named
``lib32`` alongside the normal target packages. When combining these
@@ -8658,6 +8652,8 @@ In order to run tests, you need to do the following:
- Be sure to use an absolute path when calling this script
with sudo.
+ - Ensure that your host has the package ``iptables`` installed.
+
- The package recipe ``qemu-helper-native`` is required to run
this script. Build the package using the following command:
::
diff --git a/documentation/dev-manual/dev-manual-start.rst b/documentation/dev-manual/dev-manual-start.rst
index a85b86fbfb..6a330d4a32 100644
--- a/documentation/dev-manual/dev-manual-start.rst
+++ b/documentation/dev-manual/dev-manual-start.rst
@@ -659,7 +659,7 @@ Follow these steps to locate and download a particular tarball:
Using the Downloads Page
------------------------
-The :yocto_home:`Yocto Project Website <>` uses a "DOWNLOADS" page
+The :yocto_home:`Yocto Project Website <>` uses a "RELEASES" page
from which you can locate and download tarballs of any Yocto Project
release. Rather than Git repositories, these files represent snapshot
tarballs similar to the tarballs located in the Index of Releases
@@ -676,12 +676,13 @@ Releases <#accessing-index-of-releases>`__" section.
1. *Go to the Yocto Project Website:* Open The
:yocto_home:`Yocto Project Website <>` in your browser.
-2. *Get to the Downloads Area:* Select the "DOWNLOADS" item from the
- pull-down "SOFTWARE" tab menu near the top of the page.
+#. *Get to the Downloads Area:* Select the "RELEASES" item from the
+ pull-down "DEVELOPMENT" tab menu near the top of the page.
-3. *Select a Yocto Project Release:* Use the menu next to "RELEASE" to
- display and choose a recent or past supported Yocto Project release
- (e.g. &DISTRO_NAME_NO_CAP;, &DISTRO_NAME_NO_CAP_MINUS_ONE;, and so forth).
+#. *Select a Yocto Project Release:* On the top of the "RELEASE" page currently
+ supported releases are displayed, further down past supported Yocto Project
+ releases are visible. The "Download" links in the rows of the table there
+ will lead to the download tarballs for the release.
.. note::
@@ -691,9 +692,9 @@ Releases <#accessing-index-of-releases>`__" section.
You can use the "RELEASE ARCHIVE" link to reveal a menu of all Yocto
Project releases.
-4. *Download Tools or Board Support Packages (BSPs):* From the
- "DOWNLOADS" page, you can download tools or BSPs as well. Just scroll
- down the page and look for what you need.
+#. *Download Tools or Board Support Packages (BSPs):* Next to the tarballs you
+ will find download tools or BSPs as well. Just select a Yocto Project
+ release and look for what you need.
Accessing Nightly Builds
------------------------
diff --git a/documentation/kernel-dev/kernel-dev-common.rst b/documentation/kernel-dev/kernel-dev-common.rst
index 830b3e88ca..6b5e9484d0 100644
--- a/documentation/kernel-dev/kernel-dev-common.rst
+++ b/documentation/kernel-dev/kernel-dev-common.rst
@@ -1100,7 +1100,7 @@ Section.
::
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
- SRC_URI_append = "file://0001-calibrate.c-Added-some-printk-statements.patch"
+ SRC_URI_append = " file://0001-calibrate.c-Added-some-printk-statements.patch"
The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements
enable the OpenEmbedded build system to find the patch file.
diff --git a/documentation/overview-manual/overview-manual-concepts.rst b/documentation/overview-manual/overview-manual-concepts.rst
index d9f50e5194..3401f534b1 100644
--- a/documentation/overview-manual/overview-manual-concepts.rst
+++ b/documentation/overview-manual/overview-manual-concepts.rst
@@ -1986,9 +1986,7 @@ Behind the scenes, the shared state code works by looking in
shared state files. Here is an example:
::
- SSTATE_MIRRORS ?= "\
- file://.\* http://someserver.tld/share/sstate/PATH;downloadfilename=PATH \n \
- file://.\* file:///some/local/dir/sstate/PATH"
+ SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
.. note::
diff --git a/documentation/overview-manual/overview-manual-yp-intro.rst b/documentation/overview-manual/overview-manual-yp-intro.rst
index 6dd10f2187..2675074f14 100644
--- a/documentation/overview-manual/overview-manual-yp-intro.rst
+++ b/documentation/overview-manual/overview-manual-yp-intro.rst
@@ -377,7 +377,7 @@ activities using the Yocto Project:
Index <http://layers.openembedded.org/layerindex/layers/>`__, which
is a website that indexes OpenEmbedded-Core layers.
-- *Patchwork:* `Patchwork <http://jk.ozlabs.org/projects/patchwork/>`__
+- *Patchwork:* `Patchwork <https://patchwork.yoctoproject.org/>`__
is a fork of a project originally started by
`OzLabs <http://ozlabs.org/>`__. The project is a web-based tracking
system designed to streamline the process of bringing contributions
diff --git a/documentation/poky.yaml b/documentation/poky.yaml
index 2538822c17..0ab046428b 100644
--- a/documentation/poky.yaml
+++ b/documentation/poky.yaml
@@ -1,13 +1,13 @@
-DISTRO : "3.1.16"
+DISTRO : "3.1.33"
DISTRO_NAME_NO_CAP : "dunfell"
DISTRO_NAME : "Dunfell"
DISTRO_NAME_NO_CAP_MINUS_ONE : "zeus"
-YOCTO_DOC_VERSION : "3.1.16"
+YOCTO_DOC_VERSION : "3.1.33"
YOCTO_DOC_VERSION_MINUS_ONE : "3.0.4"
-DISTRO_REL_TAG : "yocto-3.1.16"
-DOCCONF_VERSION : "3.1.16"
+DISTRO_REL_TAG : "yocto-3.1.33"
+DOCCONF_VERSION : "3.1.33"
BITBAKE_SERIES : "1.46"
-POKYVERSION : "23.0.16"
+POKYVERSION : "23.0.33"
YOCTO_POKY : "poky-&DISTRO_NAME_NO_CAP;-&POKYVERSION;"
YOCTO_DL_URL : "https://downloads.yoctoproject.org"
YOCTO_AB_URL : "https://autobuilder.yoctoproject.org"
diff --git a/documentation/profile-manual/profile-manual-usage.rst b/documentation/profile-manual/profile-manual-usage.rst
index 15cf1efe1c..e389a13fc0 100644
--- a/documentation/profile-manual/profile-manual-usage.rst
+++ b/documentation/profile-manual/profile-manual-usage.rst
@@ -1734,7 +1734,7 @@ events':
The tool is pretty self-explanatory, but for more detailed information
on navigating through the data, see the `kernelshark
-website <http://rostedt.homelinux.com/kernelshark/>`__.
+website <https://kernelshark.org/Documentation.html>`__.
.. _ftrace-documentation:
@@ -1765,8 +1765,8 @@ There is a nice series of articles on using ftrace and trace-cmd at LWN:
- `trace-cmd: A front-end for
Ftrace <https://lwn.net/Articles/410200/>`__
-There's more detailed documentation kernelshark usage here:
-`KernelShark <http://rostedt.homelinux.com/kernelshark/>`__
+See also `KernelShark's documentation <https://kernelshark.org/Documentation.html>`__
+for further usage details.
An amusing yet useful README (a tracing mini-HOWTO) can be found in
``/sys/kernel/debug/tracing/README``.
diff --git a/documentation/ref-manual/ref-classes.rst b/documentation/ref-manual/ref-classes.rst
index e657fe0e55..dea27eea88 100644
--- a/documentation/ref-manual/ref-classes.rst
+++ b/documentation/ref-manual/ref-classes.rst
@@ -1315,16 +1315,6 @@ The following list shows the tests you can list with the ``WARN_QA`` and
automatically get these versions. Consequently, you should only need
to explicitly add dependencies to binary driver recipes.
-.. _ref-classes-insserv:
-
-``insserv.bbclass``
-===================
-
-The ``insserv`` class uses the ``insserv`` utility to update the order
-of symbolic links in ``/etc/rc?.d/`` within an image based on
-dependencies specified by LSB headers in the ``init.d`` scripts
-themselves.
-
.. _ref-classes-kernel:
``kernel.bbclass``
diff --git a/documentation/ref-manual/ref-features.rst b/documentation/ref-manual/ref-features.rst
index f28ad2bb4c..be3a9e3a3e 100644
--- a/documentation/ref-manual/ref-features.rst
+++ b/documentation/ref-manual/ref-features.rst
@@ -63,6 +63,8 @@ Project metadata:
- *keyboard:* Hardware has a keyboard
+- *numa:* Hardware has non-uniform memory access
+
- *pcbios:* Support for booting through BIOS
- *pci:* Hardware has a PCI bus
diff --git a/documentation/ref-manual/ref-images.rst b/documentation/ref-manual/ref-images.rst
index 56ec8562f8..70feadf1ff 100644
--- a/documentation/ref-manual/ref-images.rst
+++ b/documentation/ref-manual/ref-images.rst
@@ -14,16 +14,17 @@ image you want.
Building an image without GNU General Public License Version 3
(GPLv3), GNU Lesser General Public License Version 3 (LGPLv3), and
the GNU Affero General Public License Version 3 (AGPL-3.0) components
- is only supported for minimal and base images. Furthermore, if you
- are going to build an image using non-GPLv3 and similarly licensed
- components, you must make the following changes in the ``local.conf``
- file before using the BitBake command to build the minimal or base
- image:
- ::
+ is only tested for core-image-minimal image. Furthermore, if you would like to
+ build an image and verify that it does not include GPLv3 and similarly licensed
+ components, you must make the following changes in the image recipe
+ file before using the BitBake command to build the image:
- 1. Comment out the EXTRA_IMAGE_FEATURES line
- 2. Set INCOMPATIBLE_LICENSE = "GPL-3.0 LGPL-3.0 AGPL-3.0"
+ INCOMPATIBLE_LICENSE = "GPL-3.0* LGPL-3.0*"
+ Alternatively, you can adjust ``local.conf`` file, repeating and adjusting the line
+ for all images where the license restriction must apply:
+
+ INCOMPATIBLE_LICENSE_pn-your-image-name = "GPL-3.0* LGPL-3.0*"
From within the ``poky`` Git repository, you can use the following
command to display the list of directories within the :term:`Source Directory`
diff --git a/documentation/ref-manual/ref-release-process.rst b/documentation/ref-manual/ref-release-process.rst
index a6d9ff60ec..8dcbea7beb 100644
--- a/documentation/ref-manual/ref-release-process.rst
+++ b/documentation/ref-manual/ref-release-process.rst
@@ -138,7 +138,7 @@ consists of the following pieces:
piece of software. The test allows the packages to be be run within a
target image.
-- ``oe-selftest``: Tests combination BitBake invocations. These tests
+- ``oe-selftest``: Tests combinations of BitBake invocations. These tests
operate outside the OpenEmbedded build system itself. The
``oe-selftest`` can run all tests by default or can run selected
tests or test suites.
diff --git a/documentation/ref-manual/ref-system-requirements.rst b/documentation/ref-manual/ref-system-requirements.rst
index 109aa60d05..efb60e1009 100644
--- a/documentation/ref-manual/ref-system-requirements.rst
+++ b/documentation/ref-manual/ref-system-requirements.rst
@@ -34,16 +34,30 @@ and conceptual information in the :doc:`../overview-manual/overview-manual`.
Supported Linux Distributions
=============================
-Currently, the Yocto Project is supported on the following
-distributions:
+Currently, the &DISTRO; release ("&DISTRO_NAME;") of the Yocto Project is
+supported on the following distributions:
-- Ubuntu 16.04 (LTS)
+- Ubuntu 20.04 (LTS)
+
+- Ubuntu 22.04 (LTS)
+
+- Fedora 38
+
+- Debian GNU/Linux 11.x (Bullseye)
+
+- AlmaLinux 8
+
+The following distribution versions are still tested even though the
+organizations publishing them no longer make updates publicly available:
- Ubuntu 18.04 (LTS)
-- Ubuntu 19.04
+Finally, here are the distribution versions which were previously
+tested on former revisions of "&DISTRO_NAME;", but no longer are:
-- Ubuntu 20.04
+- Ubuntu 16.04 (LTS)
+
+- Ubuntu 19.04
- Fedora 28
@@ -61,24 +75,26 @@ distributions:
- Fedora 35
+- Fedora 36
+
+- Fedora 37
+
- CentOS 7.x
+- CentOS 8.x
+
- Debian GNU/Linux 8.x (Jessie)
- Debian GNU/Linux 9.x (Stretch)
- Debian GNU/Linux 10.x (Buster)
-- Debian GNU/Linux 11.x (Bullseye)
-
- OpenSUSE Leap 15.1
- OpenSUSE Leap 15.2
- OpenSUSE Leap 15.3
-- AlmaLinux 8.5
-
.. note::
- While the Yocto Project Team attempts to ensure all Yocto Project
diff --git a/documentation/ref-manual/ref-variables.rst b/documentation/ref-manual/ref-variables.rst
index db0cc2d66e..227c81fc39 100644
--- a/documentation/ref-manual/ref-variables.rst
+++ b/documentation/ref-manual/ref-variables.rst
@@ -3337,9 +3337,18 @@ system and gives an overview of their function and contents.
:term:`INCOMPATIBLE_LICENSE`
Specifies a space-separated list of license names (as they would
appear in :term:`LICENSE`) that should be excluded
- from the build. Recipes that provide no alternatives to listed
+ from the build (if set globally), or from an image (if set locally
+ in an image recipe).
+
+ When the variable is set globally, recipes that provide no alternatives to listed
incompatible licenses are not built. Packages that are individually
licensed with the specified incompatible licenses will be deleted.
+ Most of the time this does not allow a feasible build (because it becomes impossible
+ to satisfy build time dependencies), so the recommended way to
+ implement license restrictions is to set the variable in specific
+ image recipes where the restrictions must apply. That way there
+ are no build time restrictions, but the license check is still
+ performed when the image's filesystem is assembled from packages.
.. note::
@@ -3846,10 +3855,10 @@ system and gives an overview of their function and contents.
::
KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc features/taskstats/taskstats.scc"
- KERNEL_FEATURES_append = "${KERNEL_EXTRA_FEATURES}"
- KERNEL_FEATURES_append_qemuall = "cfg/virtio.scc"
+ KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}"
+ KERNEL_FEATURES_append_qemuall = " cfg/virtio.scc"
KERNEL_FEATURES_append_qemux86 = " cfg/sound.scc cfg/paravirt_kvm.scc"
- KERNEL_FEATURES_append_qemux86-64 = "cfg/sound.scc"
+ KERNEL_FEATURES_append_qemux86-64 = " cfg/sound.scc"
:term:`KERNEL_FIT_LINK_NAME`
The link name of the kernel flattened image tree (FIT) image. This
@@ -4048,7 +4057,7 @@ system and gives an overview of their function and contents.
SRCREV_machine_core2-32-intel-common = "43b9eced9ba8a57add36af07736344dcc383f711"
KMACHINE_core2-32-intel-common = "intel-core2-32"
KBRANCH_core2-32-intel-common = "standard/base"
- KERNEL_FEATURES_append_core2-32-intel-common = "${KERNEL_FEATURES_INTEL_COMMON}"
+ KERNEL_FEATURES_append_core2-32-intel-common = " ${KERNEL_FEATURES_INTEL_COMMON}"
The ``KMACHINE`` statement says
that the kernel understands the machine name as "intel-core2-32".
@@ -7147,6 +7156,32 @@ system and gives an overview of their function and contents.
:term:`SSTATE_DIR`
The directory for the shared state cache.
+ :term:`SSTATE_EXCLUDEDEPS_SYSROOT`
+ This variable allows to specify indirect dependencies to exclude
+ from sysroots, for example to avoid the situations when a dependency on
+ any ``-native`` recipe will pull in all dependencies of that recipe
+ in the recipe sysroot. This behaviour might not always be wanted,
+ for example when that ``-native`` recipe depends on build tools
+ that are not relevant for the current recipe.
+
+ This way, irrelevant dependencies are ignored, which could have
+ prevented the reuse of prebuilt artifacts stored in the Shared
+ State Cache.
+
+ :term:`SSTATE_EXCLUDEDEPS_SYSROOT` is evaluated as two regular
+ expressions of recipe and dependency to ignore. An example
+ is the rule in :oe_git:`meta/conf/layer.conf </openembedded-core/tree/meta/conf/layer.conf>`::
+
+ # Nothing needs to depend on libc-initial
+ # base-passwd/shadow-sysroot don't need their dependencies
+ SSTATE_EXCLUDEDEPS_SYSROOT += "\
+ .*->.*-initial.* \
+ .*(base-passwd|shadow-sysroot)->.* \
+ "
+
+ The ``->`` substring represents the dependency between
+ the two regular expressions.
+
:term:`SSTATE_MIRROR_ALLOW_NETWORK`
If set to "1", allows fetches from mirrors that are specified in
:term:`SSTATE_MIRRORS` to work even when
@@ -7542,7 +7577,7 @@ system and gives an overview of their function and contents.
``SYSTEMD_BOOT_CFG`` as follows:
::
- SYSTEMD_BOOT_CFG ?= "${:term:`S`}/loader.conf"
+ SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
For information on Systemd-boot, see the `Systemd-boot
documentation <http://www.freedesktop.org/wiki/Software/systemd/systemd-boot/>`__.
@@ -8745,4 +8780,22 @@ system and gives an overview of their function and contents.
The default value of ``XSERVER``, if not specified in the machine
configuration, is "xserver-xorg xf86-video-fbdev xf86-input-evdev".
-
+
+ :term:`XZ_THREADS`
+ Specifies the number of parallel threads that should be used when
+ using xz compression.
+
+ By default this scales with core count, but is never set less than 2
+ to ensure that multi-threaded mode is always used so that the output
+ file contents are deterministic. Builds will work with a value of 1
+ but the output will differ compared to the output from the compression
+ generated when more than one thread is used.
+
+ On systems where many tasks run in parallel, setting a limit to this
+ can be helpful in controlling system resource usage.
+
+ :term:`XZ_MEMLIMIT`
+ Specifies the maximum memory the xz compression should use as a percentage
+ of system memory. If unconstrained the xz compressor can use large amounts of
+ memory and become problematic with parallelism elsewhere in the build.
+ "50%" has been found to be a good value.
diff --git a/meta-poky/conf/distro/poky.conf b/meta-poky/conf/distro/poky.conf
index f8deac489b..25b0c8e608 100644
--- a/meta-poky/conf/distro/poky.conf
+++ b/meta-poky/conf/distro/poky.conf
@@ -1,6 +1,6 @@
DISTRO = "poky"
DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
-DISTRO_VERSION = "3.1.16"
+DISTRO_VERSION = "3.1.33"
DISTRO_CODENAME = "dunfell"
SDK_VENDOR = "-pokysdk"
SDK_VERSION = "${@d.getVar('DISTRO_VERSION').replace('snapshot-${DATE}', 'snapshot')}"
@@ -43,26 +43,13 @@ SANITY_TESTED_DISTROS ?= " \
poky-2.7 \n \
poky-3.0 \n \
poky-3.1 \n \
- ubuntu-16.04 \n \
ubuntu-18.04 \n \
- ubuntu-19.04 \n \
ubuntu-20.04 \n \
- fedora-30 \n \
- fedora-31 \n \
- fedora-32 \n \
- fedora-33 \n \
- fedora-34 \n \
- fedora-35 \n \
- centos-7 \n \
- centos-8 \n \
- debian-8 \n \
- debian-9 \n \
- debian-10 \n \
+ ubuntu-22.04 \n \
+ fedora-37 \n \
debian-11 \n \
- opensuseleap-15.1 \n \
- opensuseleap-15.2 \n \
opensuseleap-15.3 \n \
- almalinux-8.5 \n \
+ almalinux-8.8 \n \
"
# add poky sanity bbclass
INHERIT += "poky-sanity"
diff --git a/meta-poky/conf/local.conf.sample b/meta-poky/conf/local.conf.sample
index b555f1d21e..ea37a801aa 100644
--- a/meta-poky/conf/local.conf.sample
+++ b/meta-poky/conf/local.conf.sample
@@ -231,7 +231,7 @@ BB_DISKMON_DIRS ??= "\
# present in the cache. It assumes you can download something faster than you can build it
# which will depend on your network.
#
-#SSTATE_MIRRORS ?= "file://.* http://sstate.yoctoproject.org/2.5/PATH;downloadfilename=PATH"
+#SSTATE_MIRRORS ?= "file://.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
#
# Qemu configuration
diff --git a/meta-selftest/recipes-test/images/oe-selftest-image.bb b/meta-selftest/recipes-test/images/oe-selftest-image.bb
index 5d4d10eef6..6246aae910 100644
--- a/meta-selftest/recipes-test/images/oe-selftest-image.bb
+++ b/meta-selftest/recipes-test/images/oe-selftest-image.bb
@@ -1,6 +1,6 @@
SUMMARY = "An image used during oe-selftest tests"
-IMAGE_INSTALL = "packagegroup-core-boot dropbear"
+IMAGE_INSTALL = "packagegroup-core-boot packagegroup-core-ssh-dropbear"
IMAGE_FEATURES = "debug-tweaks"
IMAGE_LINGUAS = " "
diff --git a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend
index b2824cbb1d..fbe039aa95 100644
--- a/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend
+++ b/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend
@@ -7,8 +7,8 @@ KMACHINE_genericx86 ?= "common-pc"
KMACHINE_genericx86-64 ?= "common-pc-64"
KMACHINE_beaglebone-yocto ?= "beaglebone"
-SRCREV_machine_genericx86 ?= "e2020dbe2ccaef50d7e8f37a5bf08c68a006a064"
-SRCREV_machine_genericx86-64 ?= "e2020dbe2ccaef50d7e8f37a5bf08c68a006a064"
+SRCREV_machine_genericx86 ?= "35826e154ee014b64ccfa0d1f12d36b8f8a75939"
+SRCREV_machine_genericx86-64 ?= "35826e154ee014b64ccfa0d1f12d36b8f8a75939"
SRCREV_machine_edgerouter ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd"
SRCREV_machine_beaglebone-yocto ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd"
@@ -17,7 +17,7 @@ COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64"
COMPATIBLE_MACHINE_edgerouter = "edgerouter"
COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto"
-LINUX_VERSION_genericx86 = "5.4.178"
-LINUX_VERSION_genericx86-64 = "5.4.178"
+LINUX_VERSION_genericx86 = "5.4.219"
+LINUX_VERSION_genericx86-64 = "5.4.219"
LINUX_VERSION_edgerouter = "5.4.58"
LINUX_VERSION_beaglebone-yocto = "5.4.58"
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 9ef18ebd3c..6ead010fe1 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -54,9 +54,10 @@ ARCHIVER_MODE[mirror] ?= "split"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
-ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_ARCH = "${TARGET_SYS}"
+ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
-ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
@@ -100,6 +101,10 @@ python () {
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
+ # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
+ if pn.startswith('gcc-source'):
+ d.setVar('ARCHIVER_ARCH', "allarch")
+
def hasTask(task):
return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
@@ -578,7 +583,7 @@ python do_dumpdata () {
SSTATETASKS += "do_deploy_archives"
do_deploy_archives () {
- echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
+ bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
}
python do_deploy_archives_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 19604a4646..3cae577a0e 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -139,7 +139,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
# would return /usr/local/bin/ccache/gcc, but what we need is
# /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
+ if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c1954243ee 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -30,8 +30,9 @@ bin_package_do_install () {
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
fi
cd ${S}
+ install -d ${D}${base_prefix}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
+ | tar --no-same-owner -xpf - -C ${D}${base_prefix}
}
FILES_${PN} = "/"
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
new file mode 100644
index 0000000000..42b693d586
--- /dev/null
+++ b/meta/classes/create-spdx-2.2.bbclass
@@ -0,0 +1,1067 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+SPDXIMAGEWORK = "${SPDXDIR}/image-work"
+SPDXSDKWORK = "${SPDXDIR}/sdk-work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_CUSTOM_ANNOTATION_VARS ??= ""
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ else:
+ # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if not filepath.is_symlink() and filepath.is_file():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+collect_dep_recipes[vardeps] += "DEPENDS"
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+def add_download_packages(d, doc, recipe):
+ import os.path
+ from bb.fetch2 import decodeurl, CHECKSUM_LIST
+ import bb.process
+ import oe.spdx
+ import oe.sbom
+
+ for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
+ f = bb.fetch2.FetchData(src_uri, d)
+
+ for name in f.names:
+ package = oe.spdx.SPDXPackage()
+ package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
+ package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
+
+ if f.type == "file":
+ continue
+
+ uri = f.type
+ proto = getattr(f, "proto", None)
+ if proto is not None:
+ uri = uri + "+" + proto
+ uri = uri + "://" + f.host + f.path
+
+ if f.method.supports_srcrev():
+ uri = uri + "@" + f.revisions[name]
+
+ if f.method.supports_checksum(f):
+ for checksum_id in CHECKSUM_LIST:
+ if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
+ continue
+
+ expected_checksum = getattr(f, "%s_expected" % checksum_id)
+ if expected_checksum is None:
+ continue
+
+ c = oe.spdx.SPDXChecksum()
+ c.algorithm = checksum_id.upper()
+ c.checksumValue = expected_checksum
+ package.checksums.append(c)
+
+ package.downloadLocation = uri
+ doc.packages.append(package)
+ doc.add_relationship(doc, "DESCRIBES", package)
+ # In the future, we might be able to do more fancy dependencies,
+ # but this should be sufficient for now
+ doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import gzip
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with gzip.open(name, mode=mode + "b") as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
+ for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
+ recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ add_download_packages(d, doc, recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.gz")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d))
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.gz")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d))
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d))
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ if link != target_path:
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.gz")
+ make_image_link(spdx_tar_path, ".spdx.tar.gz")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import gzip
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.supplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.gz")
+ with gzip.open(spdx_tar_path, "w") as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..19c6c0ff0b
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Include this class when you don't care what version of SPDX you get; it will
+# be updated to the latest stable version that is supported
+inherit create-spdx-2.2
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index ac9f0fb22c..5e6bae1757 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -26,7 +26,7 @@ CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -42,19 +42,25 @@ CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
-CVE_CHECK_MANIFEST_JSON ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
+# Report Patched or Ignored/Whitelisted CVEs
CVE_CHECK_REPORT_PATCHED ??= "1"
+CVE_CHECK_SHOW_WARNINGS ??= "1"
+
# Provide text output
CVE_CHECK_FORMAT_TEXT ??= "1"
# Provide JSON output - disabled by default for backward compatibility
CVE_CHECK_FORMAT_JSON ??= "0"
+# Check for packages without CVEs (no issues or missing product name)
+CVE_CHECK_COVERAGE ??= "1"
+
# Whitelist for packages (PN)
CVE_CHECK_PN_WHITELIST ?= ""
@@ -75,10 +81,10 @@ CVE_CHECK_LAYER_INCLUDELIST ??= ""
# set to "alphabetical" for version using single alphabetical character as increment release
CVE_VERSION_SUFFIX ??= ""
-def generate_json_report(out_path, link_path):
+def generate_json_report(d, out_path, link_path):
if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
import json
- from oe.cve_check import cve_check_merge_jsons
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
bb.note("Generating JSON CVE summary")
index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
@@ -91,17 +97,17 @@ def generate_json_report(out_path, link_path):
cve_check_merge_jsons(summary, data)
filename = f.readline()
+ summary["package"].sort(key=lambda d: d['name'])
+
with open(out_path, "w") as f:
json.dump(summary, f, indent=2)
- if link_path != out_path:
- if os.path.exists(os.path.realpath(link_path)):
- os.remove(link_path)
- os.symlink(os.path.basename(out_path), link_path)
+ update_symlinks(out_path, link_path)
python cve_save_summary_handler () {
import shutil
import datetime
+ from oe.cve_check import update_symlinks
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
@@ -114,19 +120,15 @@ python cve_save_summary_handler () {
if os.path.exists(cve_tmp_file):
shutil.copyfile(cve_tmp_file, cve_summary_file)
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+ update_symlinks(cve_summary_file, cvefile_link)
+ bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
- if cve_summary_file and os.path.exists(cve_summary_file):
- cvefile_link = os.path.join(cvelogpath, cve_summary_name)
- # if the paths are the same don't create the link
- if cvefile_link != cve_summary_file:
- if os.path.exists(os.path.realpath(cvefile_link)):
- os.remove(cvefile_link)
- os.symlink(os.path.basename(cve_summary_file), cvefile_link)
-
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
- generate_json_report(json_summary_name, json_summary_link_name)
- bb.plain("CVE report summary created at: %s" % json_summary_link_name)
+ generate_json_report(d, json_summary_name, json_summary_link_name)
+ bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
}
addhandler cve_save_summary_handler
@@ -136,23 +138,25 @@ python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patches_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
- if patched or unpatched:
- cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
addtask cve_check before do_build
-do_cve_check[depends] = "cve-update-db-native:do_fetch"
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -164,7 +168,7 @@ python cve_check_cleanup () {
}
addhandler cve_check_cleanup
-cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
@@ -172,7 +176,9 @@ python cve_check_write_rootfs_manifest () {
"""
import shutil
- from oe.cve_check import cve_check_merge_jsons
+ import json
+ from oe.rootfs import image_list_installed_packages
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
@@ -182,117 +188,94 @@ python cve_check_write_rootfs_manifest () {
if os.path.exists(deploy_file_json):
bb.utils.remove(deploy_file_json)
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
- bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
- link_name = d.getVar("IMAGE_LINK_NAME")
+ # Create a list of relevant recipies
+ recipies = set()
+ for pkg in list(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+ 'runtime-reverse', pkg)
+ pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
+ recipies.add(pkg_data["PN"])
+
+ bb.note("Writing rootfs CVE manifest")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+
+ json_data = {"version":"1", "package": []}
+ text_data = ""
+ enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
+ enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
+
+ save_pn = d.getVar("PN")
+
+ for pkg in recipies:
+ # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
+ # it with the different PN names set each time.
+ d.setVar("PN", pkg)
+ if enable_text:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as pfile:
+ text_data += pfile.read()
+
+ if enable_json:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(json_data, data)
+
+ d.setVar("PN", save_pn)
+
+ if enable_text:
+ link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
- bb.utils.mkdirhier(os.path.dirname(manifest_name))
- shutil.copyfile(cve_tmp_file, manifest_name)
+ with open(manifest_name, "w") as f:
+ f.write(text_data)
- if manifest_name and os.path.exists(manifest_name):
- manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
- # if they are the same don't create the link
- if manifest_link != manifest_name:
- # If we already have another manifest, update symlinks
- if os.path.exists(os.path.realpath(manifest_link)):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
- bb.plain("Image CVE report stored in: %s" % manifest_name)
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE report stored in: %s" % manifest_name)
+ if enable_json:
link_path = os.path.join(deploy_dir, "%s.json" % link_name)
- manifest_path = d.getVar("CVE_CHECK_MANIFEST_JSON")
- bb.note("Generating JSON CVE manifest")
- generate_json_report(json_summary_name, json_summary_link_name)
- bb.plain("Image CVE JSON report stored in: %s" % link_path)
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
+
+ with open(manifest_name, "w") as f:
+ json.dump(json_data, f, indent=2)
+
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
}
ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
-
- import re
-
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches the last "CVE-YYYY-ID" in the file name, also if written
- # in lowercase. Possible to have multiple CVE IDs in a single
- # file name, but only the last one will be detected from the file name.
- # However, patch files contents addressing multiple CVE IDs are supported
- # (cve_match regular expression)
-
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
-
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
-
- return patched_cves
+do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from oe.cve_check import Version
+ from oe.cve_check import Version, convert_cve_version
pn = d.getVar("PN")
real_pv = d.getVar("PV")
suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
+ cves_ignored = []
+ cves_status = []
+ cves_in_recipe = False
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [], [])
+ return ([], [], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
# If the recipe has been whitelisted we return empty lists
if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
- return ([], [], [])
+ return ([], [], [], [])
cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
@@ -302,28 +285,42 @@ def check_cves(d, patched_cves):
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
+ cves_in_product = False
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_whitelist:
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
- patched_cves.add(cve)
+ cves_ignored.append(cve)
continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
continue
+ # Write status once only for each product
+ if not cves_in_product:
+ cves_status.append([product, True])
+ cves_in_product = True
+ cves_in_recipe = True
vulnerable = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ ignored = False
+
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
+ if cve in cve_whitelist:
+ ignored = True
+
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
@@ -356,18 +353,27 @@ def check_cves(d, patched_cves):
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
- bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
- cves_unpatched.append(cve)
+ if ignored:
+ bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
+ cves_ignored.append(cve)
+ else:
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
+ cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
- # TODO: not patched but not vulnerable
patched_cves.add(cve)
+ cve_cursor.close()
+
+ if not cves_in_product:
+ bb.note("No CVE records found for product %s, pn %s" % (product, pn))
+ cves_status.append([product, False])
conn.close()
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
def get_cve_info(d, cves):
"""
@@ -381,14 +387,15 @@ def get_cve_info(d, cves):
conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cursor.close()
conn.close()
return cve_data
@@ -398,7 +405,6 @@ def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
CVE manifest if enabled.
"""
-
cve_file = d.getVar("CVE_CHECK_LOG")
fdir_name = d.getVar("FILE_DIRNAME")
layer = fdir_name.split("/")[-3]
@@ -406,12 +412,18 @@ def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
if exclude_layers and layer in exclude_layers:
return
if include_layers and layer not in include_layers:
return
+ # Early exit, the text format does not report packages without CVEs
+ if not patched+unpatched+whitelisted:
+ return
+
nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
@@ -419,13 +431,16 @@ def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
for cve in sorted(cve_data):
is_patched = cve in patched
- if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ is_ignored = cve in whitelisted
+
+ if (is_patched or is_ignored) and not report_all:
continue
+
write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
+ if is_ignored:
write_string += "CVE STATUS: Whitelisted\n"
elif is_patched:
write_string += "CVE STATUS: Patched\n"
@@ -438,26 +453,25 @@ def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
- if unpatched_cves:
+ if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
- if write_string:
- with open(cve_file, "w") as f:
- bb.note("Writing file %s with CVE information" % cve_file)
- f.write(write_string)
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
+ f.write(write_string)
- if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
- bb.utils.mkdirhier(os.path.dirname(deploy_file))
- with open(deploy_file, "w") as f:
- f.write(write_string)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
- cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
- bb.utils.mkdirhier(cvelogpath)
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
- with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
- f.write("%s" % write_string)
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
+ f.write("%s" % write_string)
def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
"""
@@ -489,7 +503,7 @@ def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_fi
with open(index_path, "a+") as f:
f.write("%s\n" % fragment_path)
-def cve_write_data_json(d, patched, unpatched, ignored, cve_data):
+def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
"""
Prepare CVE data for the JSON format, then write it.
"""
@@ -503,6 +517,8 @@ def cve_write_data_json(d, patched, unpatched, ignored, cve_data):
include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
if exclude_layers and layer in exclude_layers:
return
@@ -511,20 +527,29 @@ def cve_write_data_json(d, patched, unpatched, ignored, cve_data):
unpatched_cves = []
+ product_data = []
+ for s in cve_status:
+ p = {"product": s[0], "cvesInRecord": "Yes"}
+ if s[1] == False:
+ p["cvesInRecord"] = "No"
+ product_data.append(p)
+
package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
package_data = {
"name" : d.getVar("PN"),
"layer" : layer,
- "version" : package_version
+ "version" : package_version,
+ "products": product_data
}
cve_list = []
for cve in sorted(cve_data):
is_patched = cve in patched
+ is_ignored = cve in ignored
status = "Unpatched"
- if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ if (is_patched or is_ignored) and not report_all:
continue
- if cve in ignored:
+ if is_ignored:
status = "Ignored"
elif is_patched:
status = "Patched"
@@ -554,7 +579,7 @@ def cve_write_data_json(d, patched, unpatched, ignored, cve_data):
cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
-def cve_write_data(d, patched, unpatched, ignored, cve_data):
+def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
"""
Write CVE data in each enabled format.
"""
@@ -562,4 +587,4 @@ def cve_write_data(d, patched, unpatched, ignored, cve_data):
if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
cve_write_data_text(d, patched, unpatched, ignored, cve_data)
if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
- cve_write_data_json(d, patched, unpatched, ignored, cve_data)
+ cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index b6212ebd89..76dd0b42ee 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -2,8 +2,6 @@ inherit terminal
DEVSHELL = "${SHELL}"
-PATH:prepend:task-devshell = "${COREBASE}/scripts/git-intercept:"
-
python do_devshell () {
if d.getVarFlag("do_devshell", "manualfakeroot"):
d.prependVar("DEVSHELL", "pseudo ")
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 0e0a3ae89c..9c9451e528 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -60,7 +60,7 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
@@ -207,8 +207,8 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
- stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
if git_dir == top_git_dir:
git_dir = None
except subprocess.CalledProcessError:
@@ -225,15 +225,16 @@ def srctree_hash_files(d, srcdir=None):
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
- submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
- for line in submodule_helper.splitlines():
- module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
- if os.path.isdir(module_dir):
- proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- proc.communicate()
- proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
- stdout, _ = proc.communicate()
- git_sha1 += stdout.decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index 9b53dfba7a..731ea575bd 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -4,7 +4,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index 16e46398b1..21b1a0271e 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -118,7 +118,7 @@ go_do_install() {
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 1900eff412..fbf7206d04 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -124,7 +124,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -176,6 +176,9 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
+# per default create a locale archive
+IMAGE_LOCALES_ARCHIVE ?= '1'
+
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
@@ -308,7 +311,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -434,7 +437,7 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index eb19425652..d6da53252f 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -452,12 +452,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
Check for build paths inside target files and error if not found in the whitelist
"""
+ import stat
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
- # Ignore symlinks
- if os.path.islink(path):
+ # Ignore symlinks/devs/fifos
+ mode = os.lstat(path).st_mode
+ if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
return
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
@@ -945,7 +947,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
dest = d.getVar('PKGDEST')
pn = d.getVar('PN')
- home = os.path.join(dest, 'home')
+ home = os.path.join(dest, name, 'home')
if path == home or path.startswith(home + os.sep):
return
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 07ec242e63..4cd08b96fb 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -61,8 +61,8 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 5f5e9dd444..7c7bcd3fc0 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -21,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -45,6 +49,8 @@ python __anonymous () {
if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
@@ -56,6 +62,12 @@ FIT_HASH_ALG ?= "sha256"
# fitImage Signature Algo
FIT_SIGN_ALG ?= "rsa2048"
+# fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
+# Arguments passed to mkimage for signing
+UBOOT_MKIMAGE_SIGN_ARGS ?= ""
+
#
# Emit the fitImage ITS header
#
@@ -175,6 +187,43 @@ EOF
}
#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
+
+ cat << EOF >> $1
+ bootscr-$2 {
+ description = "U-boot script";
+ data = /incbin/("$3");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash-1 {
+ algo = "$bootscr_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
+ };
+ };
+EOF
+ fi
+}
+
+#
# Emit the fitImage ITS setup section
#
# $1 ... .its filename
@@ -244,12 +293,14 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
+ conf_padding_algo="${FIT_PAD_ALG}"
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -260,6 +311,7 @@ fitimage_emit_section_config() {
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
@@ -282,21 +334,28 @@ fitimage_emit_section_config() {
fi
if [ -n "${5}" ]; then
+ conf_desc="${conf_desc}${sep}u-boot script"
+ sep=", "
+ bootscr_line="bootscr = \"bootscr-${5}\";"
+ fi
+
+ if [ -n "${6}" ]; then
conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup-${5}\";"
+ setup_line="setup = \"setup-${6}\";"
fi
- if [ "${6}" = "1" ]; then
+ if [ "${7}" = "1" ]; then
default_line="default = \"conf-${3}\";"
fi
cat << EOF >> ${1}
${default_line}
conf-${3} {
- description = "${6} ${conf_desc}";
+ description = "${7} ${conf_desc}";
${kernel_line}
${fdt_line}
${ramdisk_line}
+ ${bootscr_line}
${setup_line}
hash-1 {
algo = "${conf_csum}";
@@ -324,6 +383,11 @@ EOF
fi
if [ -n "${5}" ]; then
+ sign_line="${sign_line}${sep}\"bootscr\""
+ sep=", "
+ fi
+
+ if [ -n "${6}" ]; then
sign_line="${sign_line}${sep}\"setup\""
fi
@@ -333,6 +397,7 @@ EOF
signature-1 {
algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
+ padding = "${conf_padding_algo}";
${sign_line}
};
EOF
@@ -355,6 +420,7 @@ fitimage_assemble() {
DTBS=""
ramdiskcount=${3}
setupcount=""
+ bootscr_id=""
rm -f ${1} arch/${ARCH}/boot/${2}
fitimage_emit_fit_header ${1}
@@ -365,7 +431,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
@@ -399,7 +465,21 @@ fitimage_assemble() {
fi
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
+ #
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
@@ -407,9 +487,9 @@ fitimage_assemble() {
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
@@ -430,7 +510,7 @@ fitimage_assemble() {
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
fitimage_emit_section_maint ${1} confstart
@@ -439,9 +519,9 @@ fitimage_assemble() {
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
fi
i=`expr ${i} + 1`
done
@@ -452,7 +532,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
@@ -460,7 +540,7 @@ fitimage_assemble() {
arch/${ARCH}/boot/${2}
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -474,7 +554,8 @@ fitimage_assemble() {
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
+ -r arch/${ARCH}/boot/${2} \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
@@ -491,7 +572,11 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
@@ -502,22 +587,32 @@ kernel_do_deploy[vardepsexclude] = "DATETIME"
kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
fi
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
# UBOOT_DTB_IMAGE is a realfile, but we can't use
@@ -527,3 +622,13 @@ kernel_do_deploy_append() {
fi
fi
}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+python () {
+ d.appendVarFlag('do_compile', 'vardeps', ' INITRAMFS_IMAGE_BUNDLE')
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index a1a073b738..2abbc2ff66 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -194,7 +194,7 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
+ if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
@@ -269,6 +269,8 @@ do_kernel_metadata() {
bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
fi
+
+ set -e
}
do_patch() {
@@ -298,6 +300,8 @@ do_patch() {
fi
done
fi
+
+ set -e
}
do_kernel_checkout() {
@@ -356,6 +360,8 @@ do_kernel_checkout() {
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
fi
+
+ set -e
}
do_kernel_checkout[dirs] = "${S}"
@@ -523,6 +529,8 @@ do_validate_branches() {
kgit-s2q --clean
fi
fi
+
+ set -e
}
OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 2a3cb21fc0..ca7530095e 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -75,7 +75,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -106,6 +106,8 @@ python __anonymous () {
# standalone for use by wic and other tools.
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -141,13 +143,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -280,6 +283,14 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
python do_devshell_prepend () {
os.environ["LDFLAGS"] = ''
}
@@ -311,6 +322,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -329,12 +344,17 @@ kernel_do_compile() {
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
@@ -352,6 +372,10 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
cc_extra=$(get_cc_option)
@@ -381,8 +405,8 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
# If the kernel/ directory is empty remove it to prevent QA issues
rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
else
@@ -394,12 +418,26 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
+ if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
+ ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ fi
fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -572,11 +610,11 @@ do_savedefconfig() {
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
@@ -721,7 +759,7 @@ kernel_do_deploy() {
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
+ for imageType in ${KERNEL_IMAGETYPES} ; do
if [ "$imageType" = "fitImage" ] ; then
continue
fi
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 1143f538d6..72f489d673 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index 6b03221c7f..806b5069fd 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -91,17 +91,17 @@ def copy_license_files(lic_files_paths, destdir):
os.link(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
- # Copy license files if hard-link is not possible even if st_dev is the
+ # Copy license files if hardlink is not possible even if st_dev is the
# same on source and destination (docker container with device-mapper?)
canlink = False
else:
raise
- # Only chown if we did hardling, and, we're running under pseudo
+ # Only chown if we did hardlink and we're running under pseudo
if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
os.chown(dst,0,0)
if not canlink:
- begin_idx = int(beginline)-1 if beginline is not None else None
- end_idx = int(endline) if endline is not None else None
+ begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
+ end_idx = max(0, int(endline)) if endline is not None else None
if begin_idx is None and end_idx is None:
shutil.copyfile(src, dst)
else:
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 9f3a0c3727..325b3cbba7 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -211,7 +211,7 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native") and not dep[0] == pn]))
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 9a8b02d4f6..b5c59ac593 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7f2692c51a..dc5a9756b6 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -113,3 +113,5 @@ do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH_prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 702427fecc..49d30caef7 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -1140,6 +1140,14 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
@@ -1153,6 +1161,8 @@ python split_and_strip_files () {
for file in staticlibs:
results.append( (file,source_info(file, d)) )
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
sources = set()
for r in results:
sources.update(r[1])
@@ -1460,6 +1470,7 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS
python emit_pkgdata() {
from glob import glob
import json
+ import gzip
def process_postinst_on_target(pkg, mlprefix):
pkgval = d.getVar('PKG_%s' % pkg)
@@ -1532,6 +1543,8 @@ fi
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
@@ -1551,17 +1564,32 @@ fi
pkgval = pkg
d.setVar('PKG_%s' % pkg, pkg)
+ extended_data = {
+ "files_info": {}
+ }
+
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
@@ -1582,6 +1610,10 @@ fi
sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.gz" % pkg
+ with gzip.open(subdata_extended_file, "wt", encoding="utf-8") as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
@@ -1612,7 +1644,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 396792f0f7..49fdfaa93d 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -51,6 +51,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -58,7 +60,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index aa00d5397c..1bdfd92847 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -117,7 +117,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -363,7 +363,8 @@ python copy_buildsystem () {
f.write('BUILDCFG_HEADER = ""\n\n')
# Write METADATA_REVISION
- f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -669,7 +670,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index 87b4c85fc0..c68367449a 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -24,3 +24,5 @@ S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
+
+CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 648af09b6e..92ae69d9f2 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -7,6 +7,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -75,7 +76,7 @@
QB_MEM ?= "-m 256"
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 2d5a56c238..24051aa378 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion"
BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -73,7 +80,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f -- $i;
+ "${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +97,7 @@ do_rm_work () {
;;
esac
done
- rm -f -- $i
+ "${RM_BIN}" -f -- $i
esac
done
@@ -100,9 +107,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf -- $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf -- $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index 0fef52af40..943534c57a 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
@@ -305,7 +305,7 @@ rootfs_trim_schemas () {
}
rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
+ contaminated="${S}/host-user-contaminated.txt"
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e3647..85c7ec7434 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 37354af9d5..33e5e5952f 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -561,6 +561,14 @@ def check_tar_version(sanity_data):
version = result.split()[3]
if LooseVersion(version) < LooseVersion("1.28"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
+
+ try:
+ result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
+ if "--xattrs" not in result:
+ return "Your tar doesn't support --xattrs, please use GNU tar.\n"
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
+
return None
# We use git parameters and functionality only found in 1.7.8 or later
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 3d6fb84d63..1058778980 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -20,7 +20,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d):
components = spec.split(":")
# Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
# 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
components[2] = components[2][:avail]
components[3] = components[3][:avail]
components[4] = components[4][:avail]
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 78eb914921..21523c8f75 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -267,6 +267,10 @@ python extend_recipe_sysroot() {
pn = d.getVar("PN")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
+ manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
+ if manifestprefix:
+ sharedmanifests = sharedmanifests + "/" + manifestprefix
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index b1aef626f7..7c8b2b30a1 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -99,30 +99,9 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
}
testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
}
python do_testimage() {
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index db1d3215ef..21762b803b 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -29,7 +29,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -44,7 +44,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 4412d7c567..4d4f53ad4d 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -34,6 +34,8 @@ python uninative_event_fetchloader() {
with open(loaderchksum, "r") as f:
readchksum = f.read().strip()
if readchksum == chksum:
+ if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
+ enable_uninative(d)
return
import subprocess
@@ -167,5 +169,7 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
+ os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
+ os.chmod(f, s[stat.ST_MODE])
}
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..908b24969f 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -41,7 +41,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index 91f003d6dd..457b7790c2 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -897,7 +897,7 @@ BB_HASHCONFIG_WHITELIST ?= "${BB_HASHEXCLUDE_COMMON} DATE TIME SSH_AGENT_PID \
PARALLEL_MAKE BB_NUMBER_THREADS BB_ORIGENV BB_INVALIDCONF BBINCLUDED \
GIT_PROXY_COMMAND ALL_PROXY all_proxy NO_PROXY no_proxy FTP_PROXY ftp_proxy \
HTTP_PROXY http_proxy HTTPS_PROXY https_proxy SOCKS5_USER SOCKS5_PASSWD \
- BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT"
+ BB_SETSCENE_ENFORCE BB_CMDLINE BB_SERVER_TIMEOUT BB_NICE_LEVEL"
BB_SIGNATURE_EXCLUDE_FLAGS ?= "doc deps depends \
lockfiles type vardepsexclude vardeps vardepvalue vardepvalueexclude \
file-checksums python func task export unexport noexec nostamp dirs cleandirs \
diff --git a/meta/conf/distro/include/cve-extra-exclusions.inc b/meta/conf/distro/include/cve-extra-exclusions.inc
index e02a4d1fde..f3490db9dd 100644
--- a/meta/conf/distro/include/cve-extra-exclusions.inc
+++ b/meta/conf/distro/include/cve-extra-exclusions.inc
@@ -53,24 +53,23 @@ CVE-2015-4778 CVE-2015-4779 CVE-2015-4780 CVE-2015-4781 CVE-2015-4782 CVE-2015-4
CVE-2015-4785 CVE-2015-4786 CVE-2015-4787 CVE-2015-4788 CVE-2015-4789 CVE-2015-4790 CVE-2016-0682 \
CVE-2016-0689 CVE-2016-0692 CVE-2016-0694 CVE-2016-3418 CVE-2020-2981"
-#### CPE update pending ####
-
-# groff:groff-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2000-0803
-# Appears it was fixed in https://git.savannah.gnu.org/cgit/groff.git/commit/?id=07f95f1674217275ed4612f1dcaa95a88435c6a7
-# so from 1.17 onwards. Reported to the database for update by RP 2021/5/9. Update accepted 2021/5/10.
-#CVE_CHECK_WHITELIST += "CVE-2000-0803"
-
-
-
-#### Upstream still working on ####
-
# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-20255
# There was a proposed patch https://lists.gnu.org/archive/html/qemu-devel/2021-02/msg06098.html
-# however qemu maintainers are sure the patch is incorrect and should not be applied.
-
-# wget https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2021-31879
-# https://mail.gnu.org/archive/html/bug-wget/2021-02/msg00002.html
-# No response upstream as of 2021/5/12
+# qemu maintainers say the patch is incorrect and should not be applied
+# Ignore from OE's perspectivee as the issue is of low impact, at worst sitting in an infinite loop rather than exploitable
+CVE_CHECK_WHITELIST += "CVE-2021-20255"
+
+# qemu:qemu-native:qemu-system-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2019-12067
+# There was a proposed patch but rejected by upstream qemu. It is unclear if the issue can
+# still be reproduced or where exactly any bug is.
+# Ignore from OE's perspective as we'll pick up any fix when upstream accepts one.
+CVE_CHECK_WHITELIST += "CVE-2019-12067"
+
+# nasm:nasm-native https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2020-18974
+# It is a fuzzing related buffer overflow. It is of low impact since most devices
+# wouldn't expose an assembler. The upstream is inactive and there is little to be
+# done about the bug, ignore from an OE perspective.
+CVE_CHECK_WHITELIST += "CVE-2020-18974"
diff --git a/meta/conf/distro/include/maintainers.inc b/meta/conf/distro/include/maintainers.inc
index 1575fce8c7..11a35a2c59 100644
--- a/meta/conf/distro/include/maintainers.inc
+++ b/meta/conf/distro/include/maintainers.inc
@@ -194,7 +194,7 @@ RECIPE_MAINTAINER_pn-gcc-cross-canadian-${TRANSLATED_TARGET_ARCH} = "Khem Raj <r
RECIPE_MAINTAINER_pn-gcc-crosssdk-${SDK_SYS} = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gcc-runtime = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gcc-sanitizers = "Khem Raj <raj.khem@gmail.com>"
-RECIPE_MAINTAINER_pn-gcc-source-9.3.0 = "Khem Raj <raj.khem@gmail.com>"
+RECIPE_MAINTAINER_pn-gcc-source-9.5.0 = "Khem Raj <raj.khem@gmail.com>"
RECIPE_MAINTAINER_pn-gconf = "Ross Burton <ross.burton@arm.com>"
RECIPE_MAINTAINER_pn-gcr = "Alexander Kanavin <alex.kanavin@gmail.com>"
RECIPE_MAINTAINER_pn-gdb = "Khem Raj <raj.khem@gmail.com>"
diff --git a/meta/conf/distro/include/ptest-packagelists.inc b/meta/conf/distro/include/ptest-packagelists.inc
index badfd69325..3fb7ec2657 100644
--- a/meta/conf/distro/include/ptest-packagelists.inc
+++ b/meta/conf/distro/include/ptest-packagelists.inc
@@ -26,6 +26,7 @@ PTESTS_FAST = "\
liberror-perl-ptest \
libmodule-build-perl-ptest \
libpcre-ptest \
+ libpng-ptest \
libtimedate-perl-ptest \
libtest-needs-perl-ptest \
liburi-perl-ptest \
diff --git a/meta/conf/distro/include/yocto-uninative.inc b/meta/conf/distro/include/yocto-uninative.inc
index 411fe45a24..4ac66fd506 100644
--- a/meta/conf/distro/include/yocto-uninative.inc
+++ b/meta/conf/distro/include/yocto-uninative.inc
@@ -6,10 +6,10 @@
# to the distro running on the build machine.
#
-UNINATIVE_MAXGLIBCVERSION = "2.35"
-UNINATIVE_VERSION = "3.6"
+UNINATIVE_MAXGLIBCVERSION = "2.39"
+UNINATIVE_VERSION = "4.4"
UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/${UNINATIVE_VERSION}/"
-UNINATIVE_CHECKSUM[aarch64] ?= "d64831cf2792c8e470c2e42230660e1a8e5de56a579cdd59978791f663c2f3ed"
-UNINATIVE_CHECKSUM[i686] ?= "2f0ee9b66b1bb2c85e2b592fb3c9c7f5d77399fa638d74961330cdb8de34ca3b"
-UNINATIVE_CHECKSUM[x86_64] ?= "9bfc4c970495b3716b2f9e52c4df9f968c02463a9a95000f6657fbc3fde1f098"
+UNINATIVE_CHECKSUM[aarch64] ?= "b61876130f494f75092f21086b4a64ea5fb064045769bf1d32e9cb6af17ea8ec"
+UNINATIVE_CHECKSUM[i686] ?= "9f28627828f0082cc0344eede4d9a861a9a064bfa8f36e072e46212f0fe45fcc"
+UNINATIVE_CHECKSUM[x86_64] ?= "d81c54284be2bb886931fc87281d58177a2cd381cf99d1981f8923039a72a302"
diff --git a/meta/conf/licenses.conf b/meta/conf/licenses.conf
index 5b309eb385..c78823e847 100644
--- a/meta/conf/licenses.conf
+++ b/meta/conf/licenses.conf
@@ -13,24 +13,39 @@
SPDXLICENSEMAP[AGPL-3] = "AGPL-3.0"
SPDXLICENSEMAP[AGPLv3] = "AGPL-3.0"
SPDXLICENSEMAP[AGPLv3.0] = "AGPL-3.0"
+SPDXLICENSEMAP[AGPL-3.0-only] = "AGPL-3.0"
# GPL variations
SPDXLICENSEMAP[GPL-1] = "GPL-1.0"
SPDXLICENSEMAP[GPLv1] = "GPL-1.0"
SPDXLICENSEMAP[GPLv1.0] = "GPL-1.0"
+SPDXLICENSEMAP[GPL-1.0-only] = "GPL-1.0"
SPDXLICENSEMAP[GPL-2] = "GPL-2.0"
SPDXLICENSEMAP[GPLv2] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2+] = "GPL-2.0+"
SPDXLICENSEMAP[GPLv2.0] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2.0+] = "GPL-2.0+"
+SPDXLICENSEMAP[GPL-2.0-only] = "GPL-2.0"
SPDXLICENSEMAP[GPL-3] = "GPL-3.0"
SPDXLICENSEMAP[GPLv3] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3+] = "GPL-3.0+"
SPDXLICENSEMAP[GPLv3.0] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3.0+] = "GPL-3.0+"
+SPDXLICENSEMAP[GPL-3.0-only] = "GPL-3.0"
#LGPL variations
SPDXLICENSEMAP[LGPLv2] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPLv2+] = "LGPL-2.0+"
SPDXLICENSEMAP[LGPLv2.0] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPLv2.0+] = "LGPL-2.0+"
+SPDXLICENSEMAP[LGPL-2.0-only] = "LGPL-2.0"
SPDXLICENSEMAP[LGPL2.1] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv2.1] = "LGPL-2.1"
+SPDXLICENSEMAP[LGPLv2.1+] = "LGPL-2.1+"
+SPDXLICENSEMAP[LGPL-2.1-only] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv3] = "LGPL-3.0"
+SPDXLICENSEMAP[LGPLv3+] = "LGPL-3.0+"
+SPDXLICENSEMAP[LGPL-3.0-only] = "LGPL-3.0"
#MPL variations
SPDXLICENSEMAP[MPL-1] = "MPL-1.0"
diff --git a/meta/files/spdx-licenses.json b/meta/files/spdx-licenses.json
new file mode 100644
index 0000000000..ef926164ec
--- /dev/null
+++ b/meta/files/spdx-licenses.json
@@ -0,0 +1,5937 @@
+{
+ "licenseListVersion": "3.14",
+ "licenses": [
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0.json",
+ "referenceNumber": 0,
+ "name": "GNU General Public License v1.0 only",
+ "licenseId": "GPL-1.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/bzip2-1.0.6.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/bzip2-1.0.6.json",
+ "referenceNumber": 1,
+ "name": "bzip2 and libbzip2 License v1.0.6",
+ "licenseId": "bzip2-1.0.6",
+ "seeAlso": [
+ "https://sourceware.org/git/?p\u003dbzip2.git;a\u003dblob;f\u003dLICENSE;hb\u003dbzip2-1.0.6",
+ "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Intel-ACPI.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Intel-ACPI.json",
+ "referenceNumber": 2,
+ "name": "Intel ACPI Software License Agreement",
+ "licenseId": "Intel-ACPI",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Intel_ACPI_Software_License_Agreement"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/XSkat.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/XSkat.json",
+ "referenceNumber": 3,
+ "name": "XSkat License",
+ "licenseId": "XSkat",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/XSkat_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0.json",
+ "referenceNumber": 4,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.0 Generic",
+ "licenseId": "CC-BY-NC-SA-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Plexus.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Plexus.json",
+ "referenceNumber": 5,
+ "name": "Plexus Classworlds License",
+ "licenseId": "Plexus",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Plexus_Classworlds_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Giftware.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Giftware.json",
+ "referenceNumber": 6,
+ "name": "Giftware License",
+ "licenseId": "Giftware",
+ "seeAlso": [
+ "http://liballeg.org/license.html#allegro-4-the-giftware-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BitTorrent-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BitTorrent-1.0.json",
+ "referenceNumber": 7,
+ "name": "BitTorrent Open Source License v1.0",
+ "licenseId": "BitTorrent-1.0",
+ "seeAlso": [
+ "http://sources.gentoo.org/cgi-bin/viewvc.cgi/gentoo-x86/licenses/BitTorrent?r1\u003d1.1\u0026r2\u003d1.1.1.1\u0026diff_format\u003ds"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.1.json",
+ "referenceNumber": 8,
+ "name": "Apple Public Source License 1.1",
+ "licenseId": "APSL-1.1",
+ "seeAlso": [
+ "http://www.opensource.apple.com/source/IOSerialFamily/IOSerialFamily-7/APPLE_LICENSE"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-GCC-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-GCC-exception.json",
+ "referenceNumber": 9,
+ "name": "GNU General Public License v2.0 w/GCC Runtime Library exception",
+ "licenseId": "GPL-2.0-with-GCC-exception",
+ "seeAlso": [
+ "https://gcc.gnu.org/git/?p\u003dgcc.git;a\u003dblob;f\u003dgcc/libgcc1.c;h\u003d762f5143fc6eed57b6797c82710f3538aa52b40b;hb\u003dcb143a3ce4fb417c68f5fa2691a1b1b1053dfba9#l10"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/UPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/UPL-1.0.json",
+ "referenceNumber": 10,
+ "name": "Universal Permissive License v1.0",
+ "licenseId": "UPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/UPL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/wxWindows.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/wxWindows.json",
+ "referenceNumber": 11,
+ "name": "wxWindows Library License",
+ "licenseId": "wxWindows",
+ "seeAlso": [
+ "https://opensource.org/licenses/WXwindows"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Caldera.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Caldera.json",
+ "referenceNumber": 12,
+ "name": "Caldera License",
+ "licenseId": "Caldera",
+ "seeAlso": [
+ "http://www.lemis.com/grog/UNIX/ancient-source-all.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zend-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zend-2.0.json",
+ "referenceNumber": 13,
+ "name": "Zend License v2.0",
+ "licenseId": "Zend-2.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20130517195954/http://www.zend.com/license/2_00.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CUA-OPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CUA-OPL-1.0.json",
+ "referenceNumber": 14,
+ "name": "CUA Office Public License v1.0",
+ "licenseId": "CUA-OPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CUA-OPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/JPNIC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JPNIC.json",
+ "referenceNumber": 15,
+ "name": "Japan Network Information Center License",
+ "licenseId": "JPNIC",
+ "seeAlso": [
+ "https://gitlab.isc.org/isc-projects/bind9/blob/master/COPYRIGHT#L366"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SAX-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SAX-PD.json",
+ "referenceNumber": 16,
+ "name": "Sax Public Domain Notice",
+ "licenseId": "SAX-PD",
+ "seeAlso": [
+ "http://www.saxproject.org/copying.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-2.5.json",
+ "referenceNumber": 17,
+ "name": "Creative Commons Attribution No Derivatives 2.5 Generic",
+ "licenseId": "CC-BY-ND-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/eGenix.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/eGenix.json",
+ "referenceNumber": 18,
+ "name": "eGenix.com Public License 1.1.0",
+ "licenseId": "eGenix",
+ "seeAlso": [
+ "http://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf",
+ "https://fedoraproject.org/wiki/Licensing/eGenix.com_Public_License_1.1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPLLR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPLLR.json",
+ "referenceNumber": 19,
+ "name": "Lesser General Public License For Linguistic Resources",
+ "licenseId": "LGPLLR",
+ "seeAlso": [
+ "http://www-igm.univ-mlv.fr/~unitex/lgpllr.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.2.json",
+ "referenceNumber": 20,
+ "name": "Open LDAP Public License 2.2.2",
+ "licenseId": "OLDAP-2.2.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003ddf2cc1e21eb7c160695f5b7cffd6296c151ba188"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-3.0-DE.json",
+ "referenceNumber": 21,
+ "name": "Creative Commons Attribution No Derivatives 3.0 Germany",
+ "licenseId": "CC-BY-ND-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IPA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IPA.json",
+ "referenceNumber": 22,
+ "name": "IPA Font License",
+ "licenseId": "IPA",
+ "seeAlso": [
+ "https://opensource.org/licenses/IPA"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NCSA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NCSA.json",
+ "referenceNumber": 23,
+ "name": "University of Illinois/NCSA Open Source License",
+ "licenseId": "NCSA",
+ "seeAlso": [
+ "http://otm.illinois.edu/uiuc_openSource",
+ "https://opensource.org/licenses/NCSA"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C.json",
+ "referenceNumber": 24,
+ "name": "W3C Software Notice and License (2002-12-31)",
+ "licenseId": "W3C",
+ "seeAlso": [
+ "http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231.html",
+ "https://opensource.org/licenses/W3C"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Adobe-2006.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Adobe-2006.json",
+ "referenceNumber": 25,
+ "name": "Adobe Systems Incorporated Source Code License Agreement",
+ "licenseId": "Adobe-2006",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AdobeLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Net-SNMP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Net-SNMP.json",
+ "referenceNumber": 26,
+ "name": "Net-SNMP License",
+ "licenseId": "Net-SNMP",
+ "seeAlso": [
+ "http://net-snmp.sourceforge.net/about/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-4.0.json",
+ "referenceNumber": 27,
+ "name": "Creative Commons Attribution Share Alike 4.0 International",
+ "licenseId": "CC-BY-SA-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/4.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/YPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/YPL-1.0.json",
+ "referenceNumber": 28,
+ "name": "Yahoo! Public License v1.0",
+ "licenseId": "YPL-1.0",
+ "seeAlso": [
+ "http://www.zimbra.com/license/yahoo_public_license_1.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Nunit.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/Nunit.json",
+ "referenceNumber": 29,
+ "name": "Nunit License",
+ "licenseId": "Nunit",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Nunit"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MITNFA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MITNFA.json",
+ "referenceNumber": 30,
+ "name": "MIT +no-false-attribs license",
+ "licenseId": "MITNFA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MITNFA"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PHP-3.01.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PHP-3.01.json",
+ "referenceNumber": 31,
+ "name": "PHP License v3.01",
+ "licenseId": "PHP-3.01",
+ "seeAlso": [
+ "http://www.php.net/license/3_01.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-Source-Code.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-Source-Code.json",
+ "referenceNumber": 32,
+ "name": "BSD Source Code Attribution",
+ "licenseId": "BSD-Source-Code",
+ "seeAlso": [
+ "https://github.com/robbiehanson/CocoaHTTPServer/blob/master/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.5.json",
+ "referenceNumber": 33,
+ "name": "Creative Commons Attribution Share Alike 2.5 Generic",
+ "licenseId": "CC-BY-SA-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Motosoto.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Motosoto.json",
+ "referenceNumber": 34,
+ "name": "Motosoto License",
+ "licenseId": "Motosoto",
+ "seeAlso": [
+ "https://opensource.org/licenses/Motosoto"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-1.1.json",
+ "referenceNumber": 35,
+ "name": "Open Software License 1.1",
+ "licenseId": "OSL-1.1",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/OSL1.1"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NGPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NGPL.json",
+ "referenceNumber": 36,
+ "name": "Nethack General Public License",
+ "licenseId": "NGPL",
+ "seeAlso": [
+ "https://opensource.org/licenses/NGPL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.5-AU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.5-AU.json",
+ "referenceNumber": 37,
+ "name": "Creative Commons Attribution 2.5 Australia",
+ "licenseId": "CC-BY-2.5-AU",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.5/au/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-TOU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-TOU.json",
+ "referenceNumber": 38,
+ "name": "Unicode Terms of Use",
+ "licenseId": "Unicode-TOU",
+ "seeAlso": [
+ "http://www.unicode.org/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License.json",
+ "referenceNumber": 39,
+ "name": "BSD 3-Clause No Nuclear License",
+ "licenseId": "BSD-3-Clause-No-Nuclear-License",
+ "seeAlso": [
+ "http://download.oracle.com/otn-pub/java/licenses/bsd.txt?AuthParam\u003d1467140197_43d516ce1776bd08a58235a7785be1cc"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OPUBL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OPUBL-1.0.json",
+ "referenceNumber": 40,
+ "name": "Open Publication License v1.0",
+ "licenseId": "OPUBL-1.0",
+ "seeAlso": [
+ "http://opencontent.org/openpub/",
+ "https://www.debian.org/opl",
+ "https://www.ctan.org/license/opl"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-UK.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-UK.json",
+ "referenceNumber": 41,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.0 England and Wales",
+ "licenseId": "CC-BY-NC-SA-2.0-UK",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/uk/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLOD-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLOD-2.0.json",
+ "referenceNumber": 42,
+ "name": "Norwegian Licence for Open Government Data (NLOD) 2.0",
+ "licenseId": "NLOD-2.0",
+ "seeAlso": [
+ "http://data.norge.no/nlod/en/2.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/gnuplot.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/gnuplot.json",
+ "referenceNumber": 43,
+ "name": "gnuplot License",
+ "licenseId": "gnuplot",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Gnuplot"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPICS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPICS.json",
+ "referenceNumber": 44,
+ "name": "EPICS Open License",
+ "licenseId": "EPICS",
+ "seeAlso": [
+ "https://epics.anl.gov/license/open.php"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Info-ZIP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Info-ZIP.json",
+ "referenceNumber": 45,
+ "name": "Info-ZIP License",
+ "licenseId": "Info-ZIP",
+ "seeAlso": [
+ "http://www.info-zip.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.0.json",
+ "referenceNumber": 46,
+ "name": "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)",
+ "licenseId": "OLDAP-2.0",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dcbf50f4e1185a21abd4c0a54d3f4341fe28f36ea"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-P-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-P-2.0.json",
+ "referenceNumber": 47,
+ "name": "CERN Open Hardware Licence Version 2 - Permissive",
+ "licenseId": "CERN-OHL-P-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-Warranty.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-Warranty.json",
+ "referenceNumber": 48,
+ "name": "BSD 3-Clause No Nuclear Warranty",
+ "licenseId": "BSD-3-Clause-No-Nuclear-Warranty",
+ "seeAlso": [
+ "https://jogamp.org/git/?p\u003dgluegen.git;a\u003dblob_plain;f\u003dLICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AML.json",
+ "referenceNumber": 49,
+ "name": "Apple MIT License",
+ "licenseId": "AML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Apple_MIT_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MulanPSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MulanPSL-1.0.json",
+ "referenceNumber": 50,
+ "name": "Mulan Permissive Software License, Version 1",
+ "licenseId": "MulanPSL-1.0",
+ "seeAlso": [
+ "https://license.coscl.org.cn/MulanPSL/",
+ "https://github.com/yuwenlong/longphp/blob/25dfb70cc2a466dc4bb55ba30901cbce08d164b5/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Multics.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Multics.json",
+ "referenceNumber": 51,
+ "name": "Multics License",
+ "licenseId": "Multics",
+ "seeAlso": [
+ "https://opensource.org/licenses/Multics"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/VSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/VSL-1.0.json",
+ "referenceNumber": 52,
+ "name": "Vovida Software License v1.0",
+ "licenseId": "VSL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/VSL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/RSA-MD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RSA-MD.json",
+ "referenceNumber": 53,
+ "name": "RSA Message-Digest License",
+ "licenseId": "RSA-MD",
+ "seeAlso": [
+ "http://www.faqs.org/rfcs/rfc1321.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-PDDC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-PDDC.json",
+ "referenceNumber": 54,
+ "name": "Creative Commons Public Domain Dedication and Certification",
+ "licenseId": "CC-PDDC",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/publicdomain/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.1-JP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.1-JP.json",
+ "referenceNumber": 55,
+ "name": "Creative Commons Attribution Share Alike 2.1 Japan",
+ "licenseId": "CC-BY-SA-2.1-JP",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.1/jp/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.2.json",
+ "referenceNumber": 56,
+ "name": "LaTeX Project Public License v1.2",
+ "licenseId": "LPPL-1.2",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-94.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-94.json",
+ "referenceNumber": 57,
+ "name": "Spencer License 94",
+ "licenseId": "Spencer-94",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Henry_Spencer_Reg-Ex_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.2.json",
+ "referenceNumber": 58,
+ "name": "Open LDAP Public License v1.2",
+ "licenseId": "OLDAP-1.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d42b0383c50c299977b5893ee695cf4e486fb0dc7"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/O-UDA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/O-UDA-1.0.json",
+ "referenceNumber": 59,
+ "name": "Open Use of Data Agreement v1.0",
+ "licenseId": "O-UDA-1.0",
+ "seeAlso": [
+ "https://github.com/microsoft/Open-Use-of-Data-Agreement/blob/v1.0/O-UDA-1.0.md",
+ "https://cdla.dev/open-use-of-data-agreement-v1-0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.7.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.7.json",
+ "referenceNumber": 60,
+ "name": "Open LDAP Public License v2.7",
+ "licenseId": "OLDAP-2.7",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d47c2415c1df81556eeb39be6cad458ef87c534a2"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Glulxe.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Glulxe.json",
+ "referenceNumber": 61,
+ "name": "Glulxe License",
+ "licenseId": "Glulxe",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Glulxe"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/iMatix.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/iMatix.json",
+ "referenceNumber": 62,
+ "name": "iMatix Standard Function Library Agreement",
+ "licenseId": "iMatix",
+ "seeAlso": [
+ "http://legacy.imatix.com/html/sfl/sfl4.htm#license"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TAPR-OHL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TAPR-OHL-1.0.json",
+ "referenceNumber": 63,
+ "name": "TAPR Open Hardware License v1.0",
+ "licenseId": "TAPR-OHL-1.0",
+ "seeAlso": [
+ "https://www.tapr.org/OHL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NBPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NBPL-1.0.json",
+ "referenceNumber": 64,
+ "name": "Net Boolean Public License v1",
+ "licenseId": "NBPL-1.0",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d37b4b3f6cc4bf34e1d3dec61e69914b9819d8894"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-R-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-R-1.1.json",
+ "referenceNumber": 65,
+ "name": "Licence Libre du Québec – Réciprocité version 1.1",
+ "licenseId": "LiLiQ-R-1.1",
+ "seeAlso": [
+ "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-liliq-r-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-R-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Noweb.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Noweb.json",
+ "referenceNumber": 66,
+ "name": "Noweb License",
+ "licenseId": "Noweb",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Noweb"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC0-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC0-1.0.json",
+ "referenceNumber": 67,
+ "name": "Creative Commons Zero v1.0 Universal",
+ "licenseId": "CC0-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/publicdomain/zero/1.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-Protection.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-Protection.json",
+ "referenceNumber": 68,
+ "name": "BSD Protection License",
+ "licenseId": "BSD-Protection",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD_Protection_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-2.5.json",
+ "referenceNumber": 69,
+ "name": "Creative Commons Attribution Non Commercial 2.5 Generic",
+ "licenseId": "CC-BY-NC-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zlib.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zlib.json",
+ "referenceNumber": 70,
+ "name": "zlib License",
+ "licenseId": "Zlib",
+ "seeAlso": [
+ "http://www.zlib.net/zlib_license.html",
+ "https://opensource.org/licenses/Zlib"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-invariants-or-later.json",
+ "referenceNumber": 71,
+ "name": "GNU Free Documentation License v1.3 or later - invariants",
+ "licenseId": "GFDL-1.3-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-AT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-AT.json",
+ "referenceNumber": 72,
+ "name": "Creative Commons Attribution 3.0 Austria",
+ "licenseId": "CC-BY-3.0-AT",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/at/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.3c.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.3c.json",
+ "referenceNumber": 73,
+ "name": "LaTeX Project Public License v1.3c",
+ "licenseId": "LPPL-1.3c",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-3c.txt",
+ "https://opensource.org/licenses/LPPL-1.3c"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPL-1.0.json",
+ "referenceNumber": 74,
+ "name": "Eclipse Public License 1.0",
+ "licenseId": "EPL-1.0",
+ "seeAlso": [
+ "http://www.eclipse.org/legal/epl-v10.html",
+ "https://opensource.org/licenses/EPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-invariants-or-later.json",
+ "referenceNumber": 75,
+ "name": "GNU Free Documentation License v1.1 or later - invariants",
+ "licenseId": "GFDL-1.1-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ANTLR-PD-fallback.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ANTLR-PD-fallback.json",
+ "referenceNumber": 76,
+ "name": "ANTLR Software Rights Notice with license fallback",
+ "licenseId": "ANTLR-PD-fallback",
+ "seeAlso": [
+ "http://www.antlr2.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.4.json",
+ "referenceNumber": 77,
+ "name": "Open LDAP Public License v2.4",
+ "licenseId": "OLDAP-2.4",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dcd1284c4a91a8a380d904eee68d1583f989ed386"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.3.json",
+ "referenceNumber": 78,
+ "name": "Open LDAP Public License v2.3",
+ "licenseId": "OLDAP-2.3",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dd32cf54a32d581ab475d23c810b0a7fbaf8d63c3"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-2.1.json",
+ "referenceNumber": 79,
+ "name": "Zope Public License 2.1",
+ "licenseId": "ZPL-2.1",
+ "seeAlso": [
+ "http://old.zope.org/Resources/ZPL/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-2.0.json",
+ "referenceNumber": 80,
+ "name": "Apache License 2.0",
+ "licenseId": "Apache-2.0",
+ "seeAlso": [
+ "https://www.apache.org/licenses/LICENSE-2.0",
+ "https://opensource.org/licenses/Apache-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-2.0.json",
+ "referenceNumber": 81,
+ "name": "SGI Free Software License B v2.0",
+ "licenseId": "SGI-B-2.0",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.2.0.pdf"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Hippocratic-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Hippocratic-2.1.json",
+ "referenceNumber": 82,
+ "name": "Hippocratic License 2.1",
+ "licenseId": "Hippocratic-2.1",
+ "seeAlso": [
+ "https://firstdonoharm.dev/version/2/1/license.html",
+ "https://github.com/EthicalSource/hippocratic-license/blob/58c0e646d64ff6fbee275bfe2b9492f914e3ab2a/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0-DE.json",
+ "referenceNumber": 83,
+ "name": "Creative Commons Attribution Share Alike 3.0 Germany",
+ "licenseId": "CC-BY-SA-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-1.0.json",
+ "referenceNumber": 84,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 1.0 Generic",
+ "licenseId": "CC-BY-NC-SA-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1-or-later.json",
+ "referenceNumber": 85,
+ "name": "GNU Lesser General Public License v2.1 or later",
+ "licenseId": "LGPL-2.1-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-US.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-US.json",
+ "referenceNumber": 86,
+ "name": "Creative Commons Attribution 3.0 United States",
+ "licenseId": "CC-BY-3.0-US",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/us/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TCP-wrappers.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TCP-wrappers.json",
+ "referenceNumber": 87,
+ "name": "TCP Wrappers License",
+ "licenseId": "TCP-wrappers",
+ "seeAlso": [
+ "http://rc.quest.com/topics/openssh/license.php#tcpwrappers"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-invariants-or-later.json",
+ "referenceNumber": 88,
+ "name": "GNU Free Documentation License v1.2 or later - invariants",
+ "licenseId": "GFDL-1.2-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Eurosym.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Eurosym.json",
+ "referenceNumber": 89,
+ "name": "Eurosym License",
+ "licenseId": "Eurosym",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Eurosym"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1.json",
+ "referenceNumber": 90,
+ "name": "GNU Free Documentation License v1.1",
+ "licenseId": "GFDL-1.1",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.0.json",
+ "referenceNumber": 91,
+ "name": "LaTeX Project Public License v1.0",
+ "licenseId": "LPPL-1.0",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-0.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0+.json",
+ "referenceNumber": 92,
+ "name": "GNU Library General Public License v2 or later",
+ "licenseId": "LGPL-2.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-1.0.json",
+ "referenceNumber": 93,
+ "name": "SGI Free Software License B v1.0",
+ "licenseId": "SGI-B-1.0",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/SGIFreeSWLicB.1.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APL-1.0.json",
+ "referenceNumber": 94,
+ "name": "Adaptive Public License 1.0",
+ "licenseId": "APL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/APL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/libtiff.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libtiff.json",
+ "referenceNumber": 95,
+ "name": "libtiff License",
+ "licenseId": "libtiff",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/libtiff"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-2.1.json",
+ "referenceNumber": 96,
+ "name": "Academic Free License v2.1",
+ "licenseId": "AFL-2.1",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-2.1.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-1.0.json",
+ "referenceNumber": 97,
+ "name": "Creative Commons Attribution Non Commercial 1.0 Generic",
+ "licenseId": "CC-BY-NC-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GD.json",
+ "referenceNumber": 98,
+ "name": "GD License",
+ "licenseId": "GD",
+ "seeAlso": [
+ "https://libgd.github.io/manuals/2.3.0/files/license-txt.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-1.1.json",
+ "referenceNumber": 99,
+ "name": "Academic Free License v1.1",
+ "licenseId": "AFL-1.1",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-1.1.txt",
+ "http://wayback.archive.org/web/20021004124254/http://www.opensource.org/licenses/academic.php"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-IGO.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-IGO.json",
+ "referenceNumber": 100,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 IGO",
+ "licenseId": "CC-BY-NC-ND-3.0-IGO",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/igo/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-DFS-2015.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-DFS-2015.json",
+ "referenceNumber": 101,
+ "name": "Unicode License Agreement - Data Files and Software (2015)",
+ "licenseId": "Unicode-DFS-2015",
+ "seeAlso": [
+ "https://web.archive.org/web/20151224134844/http://unicode.org/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-only.json",
+ "referenceNumber": 102,
+ "name": "GNU Free Documentation License v1.2 only",
+ "licenseId": "GFDL-1.2-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-1.1.json",
+ "referenceNumber": 103,
+ "name": "Mozilla Public License 1.1",
+ "licenseId": "MPL-1.1",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/MPL-1.1.html",
+ "https://opensource.org/licenses/MPL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-only.json",
+ "referenceNumber": 104,
+ "name": "GNU General Public License v2.0 only",
+ "licenseId": "GPL-2.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-4.0.json",
+ "referenceNumber": 105,
+ "name": "Creative Commons Attribution Non Commercial 4.0 International",
+ "licenseId": "CC-BY-NC-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FreeImage.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FreeImage.json",
+ "referenceNumber": 106,
+ "name": "FreeImage Public License v1.0",
+ "licenseId": "FreeImage",
+ "seeAlso": [
+ "http://freeimage.sourceforge.net/freeimage-license.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SHL-0.51.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SHL-0.51.json",
+ "referenceNumber": 107,
+ "name": "Solderpad Hardware License, Version 0.51",
+ "licenseId": "SHL-0.51",
+ "seeAlso": [
+ "https://solderpad.org/licenses/SHL-0.51/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Jython.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Jython.json",
+ "referenceNumber": 108,
+ "name": "CNRI Jython License",
+ "licenseId": "CNRI-Jython",
+ "seeAlso": [
+ "http://www.jython.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-1.1.json",
+ "referenceNumber": 109,
+ "name": "Zope Public License 1.1",
+ "licenseId": "ZPL-1.1",
+ "seeAlso": [
+ "http://old.zope.org/Resources/License/ZPL-1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Afmparse.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Afmparse.json",
+ "referenceNumber": 110,
+ "name": "Afmparse License",
+ "licenseId": "Afmparse",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Afmparse"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.1.json",
+ "referenceNumber": 111,
+ "name": "Open LDAP Public License v2.1",
+ "licenseId": "OLDAP-2.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003db0d176738e96a0d3b9f85cb51e140a86f21be715"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Rdisc.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Rdisc.json",
+ "referenceNumber": 112,
+ "name": "Rdisc License",
+ "licenseId": "Rdisc",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Rdisc_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Imlib2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Imlib2.json",
+ "referenceNumber": 113,
+ "name": "Imlib2 License",
+ "licenseId": "Imlib2",
+ "seeAlso": [
+ "http://trac.enlightenment.org/e/browser/trunk/imlib2/COPYING",
+ "https://git.enlightenment.org/legacy/imlib2.git/tree/COPYING"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause-Shortened.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause-Shortened.json",
+ "referenceNumber": 114,
+ "name": "BSD 4 Clause Shortened",
+ "licenseId": "BSD-4-Clause-Shortened",
+ "seeAlso": [
+ "https://metadata.ftp-master.debian.org/changelogs//main/a/arpwatch/arpwatch_2.1a15-7_copyright"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sendmail.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sendmail.json",
+ "referenceNumber": 115,
+ "name": "Sendmail License",
+ "licenseId": "Sendmail",
+ "seeAlso": [
+ "http://www.sendmail.com/pdfs/open_source/sendmail_license.pdf",
+ "https://web.archive.org/web/20160322142305/https://www.sendmail.com/pdfs/open_source/sendmail_license.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.5.json",
+ "referenceNumber": 116,
+ "name": "Creative Commons Attribution 2.5 Generic",
+ "licenseId": "CC-BY-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AAL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AAL.json",
+ "referenceNumber": 117,
+ "name": "Attribution Assurance License",
+ "licenseId": "AAL",
+ "seeAlso": [
+ "https://opensource.org/licenses/attribution"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-2.0-no-copyleft-exception.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-2.0-no-copyleft-exception.json",
+ "referenceNumber": 118,
+ "name": "Mozilla Public License 2.0 (no copyleft exception)",
+ "licenseId": "MPL-2.0-no-copyleft-exception",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/2.0/",
+ "https://opensource.org/licenses/MPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-2.5.json",
+ "referenceNumber": 119,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 2.5 Generic",
+ "licenseId": "CC-BY-NC-ND-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-NL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-NL.json",
+ "referenceNumber": 120,
+ "name": "Creative Commons Attribution 3.0 Netherlands",
+ "licenseId": "CC-BY-3.0-NL",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/nl/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPL-1.02.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPL-1.02.json",
+ "referenceNumber": 121,
+ "name": "Lucent Public License v1.02",
+ "licenseId": "LPL-1.02",
+ "seeAlso": [
+ "http://plan9.bell-labs.com/plan9/license.html",
+ "https://opensource.org/licenses/LPL-1.02"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ECL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ECL-1.0.json",
+ "referenceNumber": 122,
+ "name": "Educational Community License v1.0",
+ "licenseId": "ECL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/ECL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0-no-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0-no-RFN.json",
+ "referenceNumber": 123,
+ "name": "SIL Open Font License 1.0 with no Reserved Font Name",
+ "licenseId": "OFL-1.0-no-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-DE.json",
+ "referenceNumber": 124,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 Germany",
+ "licenseId": "CC-BY-NC-SA-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0.json",
+ "referenceNumber": 125,
+ "name": "Creative Commons Attribution Share Alike 3.0 Unported",
+ "licenseId": "CC-BY-SA-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NTP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NTP.json",
+ "referenceNumber": 126,
+ "name": "NTP License",
+ "licenseId": "NTP",
+ "seeAlso": [
+ "https://opensource.org/licenses/NTP"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-2.0.json",
+ "referenceNumber": 127,
+ "name": "Mozilla Public License 2.0",
+ "licenseId": "MPL-2.0",
+ "seeAlso": [
+ "https://www.mozilla.org/MPL/2.0/",
+ "https://opensource.org/licenses/MPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.2.json",
+ "referenceNumber": 128,
+ "name": "Apple Public Source License 1.2",
+ "licenseId": "APSL-1.2",
+ "seeAlso": [
+ "http://www.samurajdata.se/opensource/mirror/licenses/apsl.php"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-no-invariants-only.json",
+ "referenceNumber": 129,
+ "name": "GNU Free Documentation License v1.2 only - no invariants",
+ "licenseId": "GFDL-1.2-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-2.0.json",
+ "referenceNumber": 130,
+ "name": "Artistic License 2.0",
+ "licenseId": "Artistic-2.0",
+ "seeAlso": [
+ "http://www.perlfoundation.org/artistic_license_2_0",
+ "https://www.perlfoundation.org/artistic-license-20.html",
+ "https://opensource.org/licenses/artistic-license-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0.json",
+ "referenceNumber": 131,
+ "name": "GNU General Public License v2.0 only",
+ "licenseId": "GPL-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/RSCPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RSCPL.json",
+ "referenceNumber": 132,
+ "name": "Ricoh Source Code Public License",
+ "licenseId": "RSCPL",
+ "seeAlso": [
+ "http://wayback.archive.org/web/20060715140826/http://www.risource.org/RPL/RPL-1.0A.shtml",
+ "https://opensource.org/licenses/RSCPL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sleepycat.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sleepycat.json",
+ "referenceNumber": 133,
+ "name": "Sleepycat License",
+ "licenseId": "Sleepycat",
+ "seeAlso": [
+ "https://opensource.org/licenses/Sleepycat"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/xpp.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/xpp.json",
+ "referenceNumber": 134,
+ "name": "XPP License",
+ "licenseId": "xpp",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/xpp"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Sharing-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Sharing-1.0.json",
+ "referenceNumber": 135,
+ "name": "Community Data License Agreement Sharing 1.0",
+ "licenseId": "CDLA-Sharing-1.0",
+ "seeAlso": [
+ "https://cdla.io/sharing-1-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ClArtistic.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ClArtistic.json",
+ "referenceNumber": 136,
+ "name": "Clarified Artistic License",
+ "licenseId": "ClArtistic",
+ "seeAlso": [
+ "http://gianluca.dellavedova.org/2011/01/03/clarified-artistic-license/",
+ "http://www.ncftp.com/ncftp/doc/LICENSE.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0-only.json",
+ "referenceNumber": 137,
+ "name": "Affero General Public License v1.0 only",
+ "licenseId": "AGPL-1.0-only",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0-DE.json",
+ "referenceNumber": 138,
+ "name": "Creative Commons Attribution 3.0 Germany",
+ "licenseId": "CC-BY-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-2.0.json",
+ "referenceNumber": 139,
+ "name": "Academic Free License v2.0",
+ "licenseId": "AFL-2.0",
+ "seeAlso": [
+ "http://wayback.archive.org/web/20060924134533/http://www.opensource.org/licenses/afl-2.0.txt"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Intel.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Intel.json",
+ "referenceNumber": 140,
+ "name": "Intel Open Source License",
+ "licenseId": "Intel",
+ "seeAlso": [
+ "https://opensource.org/licenses/Intel"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-no-invariants-or-later.json",
+ "referenceNumber": 141,
+ "name": "GNU Free Documentation License v1.1 or later - no invariants",
+ "licenseId": "GFDL-1.1-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APAFML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APAFML.json",
+ "referenceNumber": 142,
+ "name": "Adobe Postscript AFM License",
+ "licenseId": "APAFML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AdobePostscriptAFM"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2.json",
+ "referenceNumber": 143,
+ "name": "GNU Free Documentation License v1.2",
+ "licenseId": "GFDL-1.2",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SISSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SISSL.json",
+ "referenceNumber": 144,
+ "name": "Sun Industry Standards Source License v1.1",
+ "licenseId": "SISSL",
+ "seeAlso": [
+ "http://www.openoffice.org/licenses/sissl_license.html",
+ "https://opensource.org/licenses/SISSL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Naumen.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Naumen.json",
+ "referenceNumber": 145,
+ "name": "Naumen Public License",
+ "licenseId": "Naumen",
+ "seeAlso": [
+ "https://opensource.org/licenses/Naumen"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/HTMLTIDY.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HTMLTIDY.json",
+ "referenceNumber": 146,
+ "name": "HTML Tidy License",
+ "licenseId": "HTMLTIDY",
+ "seeAlso": [
+ "https://github.com/htacg/tidy-html5/blob/next/README/LICENSE.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.8.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.8.json",
+ "referenceNumber": 147,
+ "name": "Open LDAP Public License v2.8",
+ "licenseId": "OLDAP-2.8",
+ "seeAlso": [
+ "http://www.openldap.org/software/release/license.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/blessing.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/blessing.json",
+ "referenceNumber": 148,
+ "name": "SQLite Blessing",
+ "licenseId": "blessing",
+ "seeAlso": [
+ "https://www.sqlite.org/src/artifact/e33a4df7e32d742a?ln\u003d4-9",
+ "https://sqlite.org/src/artifact/df5091916dbb40e6"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-2.0.json",
+ "referenceNumber": 149,
+ "name": "Creative Commons Attribution No Derivatives 2.0 Generic",
+ "licenseId": "CC-BY-ND-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGTSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGTSL.json",
+ "referenceNumber": 150,
+ "name": "Open Group Test Suite License",
+ "licenseId": "OGTSL",
+ "seeAlso": [
+ "http://www.opengroup.org/testing/downloads/The_Open_Group_TSL.txt",
+ "https://opensource.org/licenses/OGTSL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0-or-later.json",
+ "referenceNumber": 151,
+ "name": "GNU Library General Public License v2 or later",
+ "licenseId": "LGPL-2.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Parity-7.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Parity-7.0.0.json",
+ "referenceNumber": 152,
+ "name": "The Parity Public License 7.0.0",
+ "licenseId": "Parity-7.0.0",
+ "seeAlso": [
+ "https://paritylicense.com/versions/7.0.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-1.0.json",
+ "referenceNumber": 153,
+ "name": "Creative Commons Attribution No Derivatives 1.0 Generic",
+ "licenseId": "CC-BY-ND-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/dvipdfm.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/dvipdfm.json",
+ "referenceNumber": 154,
+ "name": "dvipdfm License",
+ "licenseId": "dvipdfm",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/dvipdfm"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Python.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Python.json",
+ "referenceNumber": 155,
+ "name": "CNRI Python License",
+ "licenseId": "CNRI-Python",
+ "seeAlso": [
+ "https://opensource.org/licenses/CNRI-Python"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause-UC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause-UC.json",
+ "referenceNumber": 156,
+ "name": "BSD-4-Clause (University of California-Specific)",
+ "licenseId": "BSD-4-Clause-UC",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLOD-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLOD-1.0.json",
+ "referenceNumber": 157,
+ "name": "Norwegian Licence for Open Government Data (NLOD) 1.0",
+ "licenseId": "NLOD-1.0",
+ "seeAlso": [
+ "http://data.norge.no/nlod/en/1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MS-RL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MS-RL.json",
+ "referenceNumber": 158,
+ "name": "Microsoft Reciprocal License",
+ "licenseId": "MS-RL",
+ "seeAlso": [
+ "http://www.microsoft.com/opensource/licenses.mspx",
+ "https://opensource.org/licenses/MS-RL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-4.0.json",
+ "referenceNumber": 159,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 4.0 International",
+ "licenseId": "CC-BY-NC-SA-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/HaskellReport.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HaskellReport.json",
+ "referenceNumber": 160,
+ "name": "Haskell Language Report License",
+ "licenseId": "HaskellReport",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Haskell_Language_Report_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-1.0.json",
+ "referenceNumber": 161,
+ "name": "Creative Commons Attribution 1.0 Generic",
+ "licenseId": "CC-BY-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/UCL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/UCL-1.0.json",
+ "referenceNumber": 162,
+ "name": "Upstream Compatibility License v1.0",
+ "licenseId": "UCL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/UCL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Mup.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Mup.json",
+ "referenceNumber": 163,
+ "name": "Mup License",
+ "licenseId": "Mup",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Mup"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SMPPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SMPPL.json",
+ "referenceNumber": 164,
+ "name": "Secure Messaging Protocol Public License",
+ "licenseId": "SMPPL",
+ "seeAlso": [
+ "https://github.com/dcblake/SMP/blob/master/Documentation/License.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PHP-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PHP-3.0.json",
+ "referenceNumber": 165,
+ "name": "PHP License v3.0",
+ "licenseId": "PHP-3.0",
+ "seeAlso": [
+ "http://www.php.net/license/3_0.txt",
+ "https://opensource.org/licenses/PHP-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GL2PS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GL2PS.json",
+ "referenceNumber": 166,
+ "name": "GL2PS License",
+ "licenseId": "GL2PS",
+ "seeAlso": [
+ "http://www.geuz.org/gl2ps/COPYING.GL2PS"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CrystalStacker.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CrystalStacker.json",
+ "referenceNumber": 167,
+ "name": "CrystalStacker License",
+ "licenseId": "CrystalStacker",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:CrystalStacker?rd\u003dLicensing/CrystalStacker"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C-20150513.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C-20150513.json",
+ "referenceNumber": 168,
+ "name": "W3C Software Notice and Document License (2015-05-13)",
+ "licenseId": "W3C-20150513",
+ "seeAlso": [
+ "https://www.w3.org/Consortium/Legal/2015/copyright-software-and-document"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NIST-PD-fallback.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NIST-PD-fallback.json",
+ "referenceNumber": 169,
+ "name": "NIST Public Domain Notice with license fallback",
+ "licenseId": "NIST-PD-fallback",
+ "seeAlso": [
+ "https://github.com/usnistgov/jsip/blob/59700e6926cbe96c5cdae897d9a7d2656b42abe3/LICENSE",
+ "https://github.com/usnistgov/fipy/blob/86aaa5c2ba2c6f1be19593c5986071cf6568cc34/LICENSE.rst"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-1.0.json",
+ "referenceNumber": 170,
+ "name": "Open Government Licence v1.0",
+ "licenseId": "OGL-UK-1.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/1/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPL-1.0.json",
+ "referenceNumber": 171,
+ "name": "Common Public License 1.0",
+ "licenseId": "CPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1-only.json",
+ "referenceNumber": 172,
+ "name": "GNU Lesser General Public License v2.1 only",
+ "licenseId": "LGPL-2.1-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ZPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ZPL-2.0.json",
+ "referenceNumber": 173,
+ "name": "Zope Public License 2.0",
+ "licenseId": "ZPL-2.0",
+ "seeAlso": [
+ "http://old.zope.org/Resources/License/ZPL-2.0",
+ "https://opensource.org/licenses/ZPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Frameworx-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Frameworx-1.0.json",
+ "referenceNumber": 174,
+ "name": "Frameworx Open License 1.0",
+ "licenseId": "Frameworx-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Frameworx-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0-only.json",
+ "referenceNumber": 175,
+ "name": "GNU Affero General Public License v3.0 only",
+ "licenseId": "AGPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/DRL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DRL-1.0.json",
+ "referenceNumber": 176,
+ "name": "Detection Rule License 1.0",
+ "licenseId": "DRL-1.0",
+ "seeAlso": [
+ "https://github.com/Neo23x0/sigma/blob/master/LICENSE.Detection.Rules.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EFL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EFL-2.0.json",
+ "referenceNumber": 177,
+ "name": "Eiffel Forum License v2.0",
+ "licenseId": "EFL-2.0",
+ "seeAlso": [
+ "http://www.eiffel-nice.org/license/eiffel-forum-license-2.html",
+ "https://opensource.org/licenses/EFL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-99.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-99.json",
+ "referenceNumber": 178,
+ "name": "Spencer License 99",
+ "licenseId": "Spencer-99",
+ "seeAlso": [
+ "http://www.opensource.apple.com/source/tcl/tcl-5/tcl/generic/regfronts.c"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CAL-1.0-Combined-Work-Exception.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CAL-1.0-Combined-Work-Exception.json",
+ "referenceNumber": 179,
+ "name": "Cryptographic Autonomy License 1.0 (Combined Work Exception)",
+ "licenseId": "CAL-1.0-Combined-Work-Exception",
+ "seeAlso": [
+ "http://cryptographicautonomylicense.com/license-text.html",
+ "https://opensource.org/licenses/CAL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-invariants-only.json",
+ "referenceNumber": 180,
+ "name": "GNU Free Documentation License v1.1 only - invariants",
+ "licenseId": "GFDL-1.1-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TCL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TCL.json",
+ "referenceNumber": 181,
+ "name": "TCL/TK License",
+ "licenseId": "TCL",
+ "seeAlso": [
+ "http://www.tcl.tk/software/tcltk/license.html",
+ "https://fedoraproject.org/wiki/Licensing/TCL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SHL-0.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SHL-0.5.json",
+ "referenceNumber": 182,
+ "name": "Solderpad Hardware License v0.5",
+ "licenseId": "SHL-0.5",
+ "seeAlso": [
+ "https://solderpad.org/licenses/SHL-0.5/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0-RFN.json",
+ "referenceNumber": 183,
+ "name": "SIL Open Font License 1.0 with Reserved Font Name",
+ "licenseId": "OFL-1.0-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0.json",
+ "referenceNumber": 184,
+ "name": "GNU Library General Public License v2 only",
+ "licenseId": "LGPL-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-W-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-W-2.0.json",
+ "referenceNumber": 185,
+ "name": "CERN Open Hardware Licence Version 2 - Weakly Reciprocal",
+ "licenseId": "CERN-OHL-W-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Glide.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Glide.json",
+ "referenceNumber": 186,
+ "name": "3dfx Glide License",
+ "licenseId": "Glide",
+ "seeAlso": [
+ "http://www.users.on.net/~triforce/glidexp/COPYING.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/mpich2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/mpich2.json",
+ "referenceNumber": 187,
+ "name": "mpich2 License",
+ "licenseId": "mpich2",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/psutils.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/psutils.json",
+ "referenceNumber": 188,
+ "name": "psutils License",
+ "licenseId": "psutils",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/psutils"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SPL-1.0.json",
+ "referenceNumber": 189,
+ "name": "Sun Public License v1.0",
+ "licenseId": "SPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/SPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-1.1.json",
+ "referenceNumber": 190,
+ "name": "Apache License 1.1",
+ "licenseId": "Apache-1.1",
+ "seeAlso": [
+ "http://apache.org/licenses/LICENSE-1.1",
+ "https://opensource.org/licenses/Apache-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-4.0.json",
+ "referenceNumber": 191,
+ "name": "Creative Commons Attribution No Derivatives 4.0 International",
+ "licenseId": "CC-BY-ND-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FreeBSD-DOC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FreeBSD-DOC.json",
+ "referenceNumber": 192,
+ "name": "FreeBSD Documentation License",
+ "licenseId": "FreeBSD-DOC",
+ "seeAlso": [
+ "https://www.freebsd.org/copyright/freebsd-doc-license/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SCEA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SCEA.json",
+ "referenceNumber": 193,
+ "name": "SCEA Shared Source License",
+ "licenseId": "SCEA",
+ "seeAlso": [
+ "http://research.scea.com/scea_shared_source_license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Latex2e.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Latex2e.json",
+ "referenceNumber": 194,
+ "name": "Latex2e License",
+ "licenseId": "Latex2e",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Latex2e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0-cl8.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0-cl8.json",
+ "referenceNumber": 195,
+ "name": "Artistic License 1.0 w/clause 8",
+ "licenseId": "Artistic-1.0-cl8",
+ "seeAlso": [
+ "https://opensource.org/licenses/Artistic-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SGI-B-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SGI-B-1.1.json",
+ "referenceNumber": 196,
+ "name": "SGI Free Software License B v1.1",
+ "licenseId": "SGI-B-1.1",
+ "seeAlso": [
+ "http://oss.sgi.com/projects/FreeB/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NRL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NRL.json",
+ "referenceNumber": 197,
+ "name": "NRL License",
+ "licenseId": "NRL",
+ "seeAlso": [
+ "http://web.mit.edu/network/isakmp/nrllicense.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SWL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SWL.json",
+ "referenceNumber": 198,
+ "name": "Scheme Widget Library (SWL) Software License Agreement",
+ "licenseId": "SWL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/SWL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zed.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zed.json",
+ "referenceNumber": 199,
+ "name": "Zed License",
+ "licenseId": "Zed",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Zed"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-1.1.json",
+ "referenceNumber": 200,
+ "name": "CERN Open Hardware Licence v1.1",
+ "licenseId": "CERN-OHL-1.1",
+ "seeAlso": [
+ "https://www.ohwr.org/project/licenses/wikis/cern-ohl-v1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RHeCos-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RHeCos-1.1.json",
+ "referenceNumber": 201,
+ "name": "Red Hat eCos Public License v1.1",
+ "licenseId": "RHeCos-1.1",
+ "seeAlso": [
+ "http://ecos.sourceware.org/old-license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/JasPer-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JasPer-2.0.json",
+ "referenceNumber": 202,
+ "name": "JasPer License",
+ "licenseId": "JasPer-2.0",
+ "seeAlso": [
+ "http://www.ece.uvic.ca/~mdadams/jasper/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSPL-1.0.json",
+ "referenceNumber": 203,
+ "name": "Server Side Public License, v 1",
+ "licenseId": "SSPL-1.0",
+ "seeAlso": [
+ "https://www.mongodb.com/licensing/server-side-public-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0+.json",
+ "referenceNumber": 204,
+ "name": "GNU General Public License v2.0 or later",
+ "licenseId": "GPL-2.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.4.json",
+ "referenceNumber": 205,
+ "name": "Open LDAP Public License v1.4",
+ "licenseId": "OLDAP-1.4",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003dc9f95c2f3f2ffb5e0ae55fe7388af75547660941"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/libpng-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libpng-2.0.json",
+ "referenceNumber": 206,
+ "name": "PNG Reference Library version 2",
+ "licenseId": "libpng-2.0",
+ "seeAlso": [
+ "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CNRI-Python-GPL-Compatible.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CNRI-Python-GPL-Compatible.json",
+ "referenceNumber": 207,
+ "name": "CNRI Python Open Source GPL Compatible License Agreement",
+ "licenseId": "CNRI-Python-GPL-Compatible",
+ "seeAlso": [
+ "http://www.python.org/download/releases/1.6.1/download_win/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Aladdin.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Aladdin.json",
+ "referenceNumber": 208,
+ "name": "Aladdin Free Public License",
+ "licenseId": "Aladdin",
+ "seeAlso": [
+ "http://pages.cs.wisc.edu/~ghost/doc/AFPL/6.01/Public.htm"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-1.0.json",
+ "referenceNumber": 209,
+ "name": "CeCILL Free Software License Agreement v1.0",
+ "licenseId": "CECILL-1.0",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V1-fr.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Ruby.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Ruby.json",
+ "referenceNumber": 210,
+ "name": "Ruby License",
+ "licenseId": "Ruby",
+ "seeAlso": [
+ "http://www.ruby-lang.org/en/LICENSE.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPL-1.1.json",
+ "referenceNumber": 211,
+ "name": "Netscape Public License v1.1",
+ "licenseId": "NPL-1.1",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/NPL/1.1/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ImageMagick.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ImageMagick.json",
+ "referenceNumber": 212,
+ "name": "ImageMagick License",
+ "licenseId": "ImageMagick",
+ "seeAlso": [
+ "http://www.imagemagick.org/script/license.php"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Cube.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Cube.json",
+ "referenceNumber": 213,
+ "name": "Cube License",
+ "licenseId": "Cube",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Cube"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-only.json",
+ "referenceNumber": 214,
+ "name": "GNU Free Documentation License v1.1 only",
+ "licenseId": "GFDL-1.1-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-2.0.json",
+ "referenceNumber": 215,
+ "name": "Creative Commons Attribution 2.0 Generic",
+ "licenseId": "CC-BY-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-1.2.json",
+ "referenceNumber": 216,
+ "name": "Academic Free License v1.2",
+ "licenseId": "AFL-1.2",
+ "seeAlso": [
+ "http://opensource.linux-mirror.org/licenses/afl-1.2.txt",
+ "http://wayback.archive.org/web/20021204204652/http://www.opensource.org/licenses/academic.php"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.0.json",
+ "referenceNumber": 217,
+ "name": "Creative Commons Attribution Share Alike 2.0 Generic",
+ "licenseId": "CC-BY-SA-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-2.0.json",
+ "referenceNumber": 218,
+ "name": "CeCILL Free Software License Agreement v2.0",
+ "licenseId": "CECILL-2.0",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V2-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-advertising.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-advertising.json",
+ "referenceNumber": 219,
+ "name": "Enlightenment License (e16)",
+ "licenseId": "MIT-advertising",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT_With_Advertising"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.5.json",
+ "referenceNumber": 220,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 2.5 Generic",
+ "licenseId": "CC-BY-NC-SA-2.5",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.5/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0.json",
+ "referenceNumber": 221,
+ "name": "Artistic License 1.0",
+ "licenseId": "Artistic-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Artistic-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-3.0.json",
+ "referenceNumber": 222,
+ "name": "Open Software License 3.0",
+ "licenseId": "OSL-3.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20120101081418/http://rosenlaw.com:80/OSL3.0.htm",
+ "https://opensource.org/licenses/OSL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/X11.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/X11.json",
+ "referenceNumber": 223,
+ "name": "X11 License",
+ "licenseId": "X11",
+ "seeAlso": [
+ "http://www.xfree86.org/3.3.6/COPYRIGHT2.html#3"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Bahyph.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Bahyph.json",
+ "referenceNumber": 224,
+ "name": "Bahyph License",
+ "licenseId": "Bahyph",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Bahyph"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.0.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.0.1.json",
+ "referenceNumber": 225,
+ "name": "Open LDAP Public License v2.0.1",
+ "licenseId": "OLDAP-2.0.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003db6d68acd14e51ca3aab4428bf26522aa74873f0e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUDatagrid.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUDatagrid.json",
+ "referenceNumber": 226,
+ "name": "EU DataGrid Software License",
+ "licenseId": "EUDatagrid",
+ "seeAlso": [
+ "http://eu-datagrid.web.cern.ch/eu-datagrid/license.html",
+ "https://opensource.org/licenses/EUDatagrid"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MTLL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MTLL.json",
+ "referenceNumber": 227,
+ "name": "Matrix Template Library License",
+ "licenseId": "MTLL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Matrix_Template_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-invariants-only.json",
+ "referenceNumber": 228,
+ "name": "GNU Free Documentation License v1.2 only - invariants",
+ "licenseId": "GFDL-1.2-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-no-invariants-or-later.json",
+ "referenceNumber": 229,
+ "name": "GNU Free Documentation License v1.3 or later - no invariants",
+ "licenseId": "GFDL-1.3-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/curl.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/curl.json",
+ "referenceNumber": 230,
+ "name": "curl License",
+ "licenseId": "curl",
+ "seeAlso": [
+ "https://github.com/bagder/curl/blob/master/COPYING"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LAL-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LAL-1.3.json",
+ "referenceNumber": 231,
+ "name": "Licence Art Libre 1.3",
+ "licenseId": "LAL-1.3",
+ "seeAlso": [
+ "https://artlibre.org/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/DSDP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DSDP.json",
+ "referenceNumber": 232,
+ "name": "DSDP License",
+ "licenseId": "DSDP",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/DSDP"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-1.2.json",
+ "referenceNumber": 233,
+ "name": "CERN Open Hardware Licence v1.2",
+ "licenseId": "CERN-OHL-1.2",
+ "seeAlso": [
+ "https://www.ohwr.org/project/licenses/wikis/cern-ohl-v1.2"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TOSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TOSL.json",
+ "referenceNumber": 234,
+ "name": "Trusster Open Source License",
+ "licenseId": "TOSL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/TOSL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-with-autoconf-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-with-autoconf-exception.json",
+ "referenceNumber": 235,
+ "name": "GNU General Public License v3.0 w/Autoconf exception",
+ "licenseId": "GPL-3.0-with-autoconf-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/autoconf-exception-3.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-3.0.json",
+ "referenceNumber": 236,
+ "name": "Creative Commons Attribution 3.0 Unported",
+ "licenseId": "CC-BY-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Qhull.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Qhull.json",
+ "referenceNumber": 237,
+ "name": "Qhull License",
+ "licenseId": "Qhull",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Qhull"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-no-invariants-only.json",
+ "referenceNumber": 238,
+ "name": "GNU Free Documentation License v1.3 only - no invariants",
+ "licenseId": "GFDL-1.3-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TORQUE-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TORQUE-1.1.json",
+ "referenceNumber": 239,
+ "name": "TORQUE v2.5+ Software License v1.1",
+ "licenseId": "TORQUE-1.1",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/TORQUEv1.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MS-PL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MS-PL.json",
+ "referenceNumber": 240,
+ "name": "Microsoft Public License",
+ "licenseId": "MS-PL",
+ "seeAlso": [
+ "http://www.microsoft.com/opensource/licenses.mspx",
+ "https://opensource.org/licenses/MS-PL"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Apache-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Apache-1.0.json",
+ "referenceNumber": 241,
+ "name": "Apache License 1.0",
+ "licenseId": "Apache-1.0",
+ "seeAlso": [
+ "http://www.apache.org/licenses/LICENSE-1.0"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/copyleft-next-0.3.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/copyleft-next-0.3.1.json",
+ "referenceNumber": 242,
+ "name": "copyleft-next 0.3.1",
+ "licenseId": "copyleft-next-0.3.1",
+ "seeAlso": [
+ "https://github.com/copyleft-next/copyleft-next/blob/master/Releases/copyleft-next-0.3.1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-or-later.json",
+ "referenceNumber": 243,
+ "name": "GNU Free Documentation License v1.2 or later",
+ "licenseId": "GFDL-1.2-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0+.json",
+ "referenceNumber": 244,
+ "name": "GNU General Public License v3.0 or later",
+ "licenseId": "GPL-3.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MulanPSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MulanPSL-2.0.json",
+ "referenceNumber": 245,
+ "name": "Mulan Permissive Software License, Version 2",
+ "licenseId": "MulanPSL-2.0",
+ "seeAlso": [
+ "https://license.coscl.org.cn/MulanPSL2/"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFAP.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFAP.json",
+ "referenceNumber": 246,
+ "name": "FSF All Permissive License",
+ "licenseId": "FSFAP",
+ "seeAlso": [
+ "https://www.gnu.org/prep/maintain/html_node/License-Notices-for-Other-Files.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Xerox.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Xerox.json",
+ "referenceNumber": 247,
+ "name": "Xerox License",
+ "licenseId": "Xerox",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Xerox"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDDL-1.0.json",
+ "referenceNumber": 248,
+ "name": "Common Development and Distribution License 1.0",
+ "licenseId": "CDDL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/cddl1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-invariants-only.json",
+ "referenceNumber": 249,
+ "name": "GNU Free Documentation License v1.3 only - invariants",
+ "licenseId": "GFDL-1.3-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/etalab-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/etalab-2.0.json",
+ "referenceNumber": 250,
+ "name": "Etalab Open License 2.0",
+ "licenseId": "etalab-2.0",
+ "seeAlso": [
+ "https://github.com/DISIC/politique-de-contribution-open-source/blob/master/LICENSE.pdf",
+ "https://raw.githubusercontent.com/DISIC/politique-de-contribution-open-source/master/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/XFree86-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/XFree86-1.1.json",
+ "referenceNumber": 251,
+ "name": "XFree86 License 1.1",
+ "licenseId": "XFree86-1.1",
+ "seeAlso": [
+ "http://www.xfree86.org/current/LICENSE4.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SNIA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SNIA.json",
+ "referenceNumber": 252,
+ "name": "SNIA Public License 1.1",
+ "licenseId": "SNIA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/SNIA_Public_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.1.json",
+ "referenceNumber": 253,
+ "name": "LaTeX Project Public License v1.1",
+ "licenseId": "LPPL-1.1",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CATOSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CATOSL-1.1.json",
+ "referenceNumber": 254,
+ "name": "Computer Associates Trusted Open Source License 1.1",
+ "licenseId": "CATOSL-1.1",
+ "seeAlso": [
+ "https://opensource.org/licenses/CATOSL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TU-Berlin-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TU-Berlin-2.0.json",
+ "referenceNumber": 255,
+ "name": "Technische Universitaet Berlin License 2.0",
+ "licenseId": "TU-Berlin-2.0",
+ "seeAlso": [
+ "https://github.com/CorsixTH/deps/blob/fd339a9f526d1d9c9f01ccf39e438a015da50035/licences/libgsm.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3.json",
+ "referenceNumber": 256,
+ "name": "GNU Free Documentation License v1.3",
+ "licenseId": "GFDL-1.3",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-or-later.json",
+ "referenceNumber": 257,
+ "name": "GNU Free Documentation License v1.3 or later",
+ "licenseId": "GFDL-1.3-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LAL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LAL-1.2.json",
+ "referenceNumber": 258,
+ "name": "Licence Art Libre 1.2",
+ "licenseId": "LAL-1.2",
+ "seeAlso": [
+ "http://artlibre.org/licence/lal/licence-art-libre-12/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ICU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ICU.json",
+ "referenceNumber": 259,
+ "name": "ICU License",
+ "licenseId": "ICU",
+ "seeAlso": [
+ "http://source.icu-project.org/repos/icu/icu/trunk/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FTL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FTL.json",
+ "referenceNumber": 260,
+ "name": "Freetype Project License",
+ "licenseId": "FTL",
+ "seeAlso": [
+ "http://freetype.fis.uniroma2.it/FTL.TXT",
+ "http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/FTL.TXT",
+ "http://gitlab.freedesktop.org/freetype/freetype/-/raw/master/docs/FTL.TXT"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MirOS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MirOS.json",
+ "referenceNumber": 261,
+ "name": "The MirOS Licence",
+ "licenseId": "MirOS",
+ "seeAlso": [
+ "https://opensource.org/licenses/MirOS"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-NetBSD.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-NetBSD.json",
+ "referenceNumber": 262,
+ "name": "BSD 2-Clause NetBSD License",
+ "licenseId": "BSD-2-Clause-NetBSD",
+ "seeAlso": [
+ "http://www.netbsd.org/about/redistribution.html#default"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0.json",
+ "referenceNumber": 263,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 Unported",
+ "licenseId": "CC-BY-NC-ND-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSET-PL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSET-PL-2.1.json",
+ "referenceNumber": 264,
+ "name": "OSET Public License version 2.1",
+ "licenseId": "OSET-PL-2.1",
+ "seeAlso": [
+ "http://www.osetfoundation.org/public-license",
+ "https://opensource.org/licenses/OPL-2.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-2.0.json",
+ "referenceNumber": 265,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 2.0 Generic",
+ "licenseId": "CC-BY-NC-ND-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SISSL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SISSL-1.2.json",
+ "referenceNumber": 266,
+ "name": "Sun Industry Standards Source License v1.2",
+ "licenseId": "SISSL-1.2",
+ "seeAlso": [
+ "http://gridscheduler.sourceforge.net/Gridengine_SISSL_license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Wsuipa.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Wsuipa.json",
+ "referenceNumber": 267,
+ "name": "Wsuipa License",
+ "licenseId": "Wsuipa",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Wsuipa"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zimbra-1.4.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zimbra-1.4.json",
+ "referenceNumber": 268,
+ "name": "Zimbra Public License v1.4",
+ "licenseId": "Zimbra-1.4",
+ "seeAlso": [
+ "http://www.zimbra.com/legal/zimbra-public-license-1-4"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Linux-OpenIB.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Linux-OpenIB.json",
+ "referenceNumber": 269,
+ "name": "Linux Kernel Variant of OpenIB.org license",
+ "licenseId": "Linux-OpenIB",
+ "seeAlso": [
+ "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/infiniband/core/sa.h"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0.json",
+ "referenceNumber": 270,
+ "name": "GNU Lesser General Public License v3.0 only",
+ "licenseId": "LGPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.5.json",
+ "referenceNumber": 271,
+ "name": "Open LDAP Public License v2.5",
+ "licenseId": "OLDAP-2.5",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d6852b9d90022e8593c98205413380536b1b5a7cf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AMPAS.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AMPAS.json",
+ "referenceNumber": 272,
+ "name": "Academy of Motion Picture Arts and Sciences BSD",
+ "licenseId": "AMPAS",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD#AMPASBSD"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0-or-later.json",
+ "referenceNumber": 273,
+ "name": "GNU General Public License v1.0 or later",
+ "licenseId": "GPL-1.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BUSL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BUSL-1.1.json",
+ "referenceNumber": 274,
+ "name": "Business Source License 1.1",
+ "licenseId": "BUSL-1.1",
+ "seeAlso": [
+ "https://mariadb.com/bsl11/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Adobe-Glyph.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Adobe-Glyph.json",
+ "referenceNumber": 275,
+ "name": "Adobe Glyph List License",
+ "licenseId": "Adobe-Glyph",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#AdobeGlyph"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/0BSD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/0BSD.json",
+ "referenceNumber": 276,
+ "name": "BSD Zero Clause License",
+ "licenseId": "0BSD",
+ "seeAlso": [
+ "http://landley.net/toybox/license.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/W3C-19980720.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/W3C-19980720.json",
+ "referenceNumber": 277,
+ "name": "W3C Software Notice and License (1998-07-20)",
+ "licenseId": "W3C-19980720",
+ "seeAlso": [
+ "http://www.w3.org/Consortium/Legal/copyright-software-19980720.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFUL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFUL.json",
+ "referenceNumber": 278,
+ "name": "FSF Unlimited License",
+ "licenseId": "FSFUL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0.json",
+ "referenceNumber": 279,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 Unported",
+ "licenseId": "CC-BY-NC-SA-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/DOC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/DOC.json",
+ "referenceNumber": 280,
+ "name": "DOC License",
+ "licenseId": "DOC",
+ "seeAlso": [
+ "http://www.cs.wustl.edu/~schmidt/ACE-copying.html",
+ "https://www.dre.vanderbilt.edu/~schmidt/ACE-copying.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/TMate.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TMate.json",
+ "referenceNumber": 281,
+ "name": "TMate Open Source License",
+ "licenseId": "TMate",
+ "seeAlso": [
+ "http://svnkit.com/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-open-group.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-open-group.json",
+ "referenceNumber": 282,
+ "name": "MIT Open Group variant",
+ "licenseId": "MIT-open-group",
+ "seeAlso": [
+ "https://gitlab.freedesktop.org/xorg/app/iceauth/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xvinfo/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xsetroot/-/blob/master/COPYING",
+ "https://gitlab.freedesktop.org/xorg/app/xauth/-/blob/master/COPYING"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AMDPLPA.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AMDPLPA.json",
+ "referenceNumber": 283,
+ "name": "AMD\u0027s plpa_map.c License",
+ "licenseId": "AMDPLPA",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AMD_plpa_map_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Condor-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Condor-1.1.json",
+ "referenceNumber": 284,
+ "name": "Condor Public License v1.1",
+ "licenseId": "Condor-1.1",
+ "seeAlso": [
+ "http://research.cs.wisc.edu/condor/license.html#condor",
+ "http://web.archive.org/web/20111123062036/http://research.cs.wisc.edu/condor/license.html#condor"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/PolyForm-Noncommercial-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PolyForm-Noncommercial-1.0.0.json",
+ "referenceNumber": 285,
+ "name": "PolyForm Noncommercial License 1.0.0",
+ "licenseId": "PolyForm-Noncommercial-1.0.0",
+ "seeAlso": [
+ "https://polyformproject.org/licenses/noncommercial/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Military-License.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Military-License.json",
+ "referenceNumber": 286,
+ "name": "BSD 3-Clause No Military License",
+ "licenseId": "BSD-3-Clause-No-Military-License",
+ "seeAlso": [
+ "https://gitlab.syncad.com/hive/dhive/-/blob/master/LICENSE",
+ "https://github.com/greymass/swift-eosio/blob/master/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-4.0.json",
+ "referenceNumber": 287,
+ "name": "Creative Commons Attribution 4.0 International",
+ "licenseId": "CC-BY-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by/4.0/legalcode"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-Canada-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-Canada-2.0.json",
+ "referenceNumber": 288,
+ "name": "Open Government Licence - Canada",
+ "licenseId": "OGL-Canada-2.0",
+ "seeAlso": [
+ "https://open.canada.ca/en/open-government-licence-canada"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-IGO.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-3.0-IGO.json",
+ "referenceNumber": 289,
+ "name": "Creative Commons Attribution Non Commercial Share Alike 3.0 IGO",
+ "licenseId": "CC-BY-NC-SA-3.0-IGO",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/3.0/igo/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EFL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EFL-1.0.json",
+ "referenceNumber": 290,
+ "name": "Eiffel Forum License v1.0",
+ "licenseId": "EFL-1.0",
+ "seeAlso": [
+ "http://www.eiffel-nice.org/license/forum.txt",
+ "https://opensource.org/licenses/EFL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Newsletr.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Newsletr.json",
+ "referenceNumber": 291,
+ "name": "Newsletr License",
+ "licenseId": "Newsletr",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Newsletr"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/copyleft-next-0.3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/copyleft-next-0.3.0.json",
+ "referenceNumber": 292,
+ "name": "copyleft-next 0.3.0",
+ "licenseId": "copyleft-next-0.3.0",
+ "seeAlso": [
+ "https://github.com/copyleft-next/copyleft-next/blob/master/Releases/copyleft-next-0.3.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-or-later.json",
+ "referenceNumber": 293,
+ "name": "GNU General Public License v3.0 or later",
+ "licenseId": "GPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Permissive-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Permissive-2.0.json",
+ "referenceNumber": 294,
+ "name": "Community Data License Agreement Permissive 2.0",
+ "licenseId": "CDLA-Permissive-2.0",
+ "seeAlso": [
+ "https://cdla.dev/permissive-2-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-ND-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-ND-3.0.json",
+ "referenceNumber": 295,
+ "name": "Creative Commons Attribution No Derivatives 3.0 Unported",
+ "licenseId": "CC-BY-ND-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/C-UDA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/C-UDA-1.0.json",
+ "referenceNumber": 296,
+ "name": "Computational Use of Data Agreement v1.0",
+ "licenseId": "C-UDA-1.0",
+ "seeAlso": [
+ "https://github.com/microsoft/Computational-Use-of-Data-Agreement/blob/master/C-UDA-1.0.md",
+ "https://cdla.dev/computational-use-of-data-agreement-v1-0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Barr.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Barr.json",
+ "referenceNumber": 297,
+ "name": "Barr License",
+ "licenseId": "Barr",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Barr"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Vim.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Vim.json",
+ "referenceNumber": 298,
+ "name": "Vim License",
+ "licenseId": "Vim",
+ "seeAlso": [
+ "http://vimdoc.sourceforge.net/htmldoc/uganda.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-classpath-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-classpath-exception.json",
+ "referenceNumber": 299,
+ "name": "GNU General Public License v2.0 w/Classpath exception",
+ "licenseId": "GPL-2.0-with-classpath-exception",
+ "seeAlso": [
+ "https://www.gnu.org/software/classpath/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BitTorrent-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BitTorrent-1.1.json",
+ "referenceNumber": 300,
+ "name": "BitTorrent Open Source License v1.1",
+ "licenseId": "BitTorrent-1.1",
+ "seeAlso": [
+ "http://directory.fsf.org/wiki/License:BitTorrentOSL1.1"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDL-1.0.json",
+ "referenceNumber": 301,
+ "name": "Common Documentation License 1.0",
+ "licenseId": "CDL-1.0",
+ "seeAlso": [
+ "http://www.opensource.apple.com/cdl/",
+ "https://fedoraproject.org/wiki/Licensing/Common_Documentation_License",
+ "https://www.gnu.org/licenses/license-list.html#ACDL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-1.0.json",
+ "referenceNumber": 302,
+ "name": "Creative Commons Attribution Share Alike 1.0 Generic",
+ "licenseId": "CC-BY-SA-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ADSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ADSL.json",
+ "referenceNumber": 303,
+ "name": "Amazon Digital Services License",
+ "licenseId": "ADSL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/AmazonDigitalServicesLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PostgreSQL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PostgreSQL.json",
+ "referenceNumber": 304,
+ "name": "PostgreSQL License",
+ "licenseId": "PostgreSQL",
+ "seeAlso": [
+ "http://www.postgresql.org/about/licence",
+ "https://opensource.org/licenses/PostgreSQL"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1.json",
+ "referenceNumber": 305,
+ "name": "SIL Open Font License 1.1",
+ "licenseId": "OFL-1.1",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPL-1.0.json",
+ "referenceNumber": 306,
+ "name": "Netscape Public License v1.0",
+ "licenseId": "NPL-1.0",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/NPL/1.0/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/xinetd.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/xinetd.json",
+ "referenceNumber": 307,
+ "name": "xinetd License",
+ "licenseId": "xinetd",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Xinetd_License"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.0-only.json",
+ "referenceNumber": 308,
+ "name": "GNU Library General Public License v2 only",
+ "licenseId": "LGPL-2.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.0-standalone.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/zlib-acknowledgement.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/zlib-acknowledgement.json",
+ "referenceNumber": 309,
+ "name": "zlib/libpng License with Acknowledgement",
+ "licenseId": "zlib-acknowledgement",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/ZlibWithAcknowledgement"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.1.json",
+ "referenceNumber": 310,
+ "name": "Open LDAP Public License v2.2.1",
+ "licenseId": "OLDAP-2.2.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d4bc786f34b50aa301be6f5600f58a980070f481e"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-1.0.json",
+ "referenceNumber": 311,
+ "name": "Apple Public Source License 1.0",
+ "licenseId": "APSL-1.0",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Apple_Public_Source_License_1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-LBNL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-LBNL.json",
+ "referenceNumber": 312,
+ "name": "Lawrence Berkeley National Labs BSD variant license",
+ "licenseId": "BSD-3-Clause-LBNL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/LBNLBSD"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GLWTPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GLWTPL.json",
+ "referenceNumber": 313,
+ "name": "Good Luck With That Public License",
+ "licenseId": "GLWTPL",
+ "seeAlso": [
+ "https://github.com/me-shaon/GLWTPL/commit/da5f6bc734095efbacb442c0b31e33a65b9d6e85"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0-only.json",
+ "referenceNumber": 314,
+ "name": "GNU Lesser General Public License v3.0 only",
+ "licenseId": "LGPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGC-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGC-1.0.json",
+ "referenceNumber": 315,
+ "name": "OGC Software License, Version 1.0",
+ "licenseId": "OGC-1.0",
+ "seeAlso": [
+ "https://www.ogc.org/ogc/software/1.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Dotseqn.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Dotseqn.json",
+ "referenceNumber": 316,
+ "name": "Dotseqn License",
+ "licenseId": "Dotseqn",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Dotseqn"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MakeIndex.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MakeIndex.json",
+ "referenceNumber": 317,
+ "name": "MakeIndex License",
+ "licenseId": "MakeIndex",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MakeIndex"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-only.json",
+ "referenceNumber": 318,
+ "name": "GNU General Public License v3.0 only",
+ "licenseId": "GPL-3.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License-2014.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-No-Nuclear-License-2014.json",
+ "referenceNumber": 319,
+ "name": "BSD 3-Clause No Nuclear License 2014",
+ "licenseId": "BSD-3-Clause-No-Nuclear-License-2014",
+ "seeAlso": [
+ "https://java.net/projects/javaeetutorial/pages/BerkeleyLicense"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0-only.json",
+ "referenceNumber": 320,
+ "name": "GNU General Public License v1.0 only",
+ "licenseId": "GPL-1.0-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IJG.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IJG.json",
+ "referenceNumber": 321,
+ "name": "Independent JPEG Group License",
+ "licenseId": "IJG",
+ "seeAlso": [
+ "http://dev.w3.org/cvsweb/Amaya/libjpeg/Attic/README?rev\u003d1.2"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0-or-later.json",
+ "referenceNumber": 322,
+ "name": "Affero General Public License v1.0 or later",
+ "licenseId": "AGPL-1.0-or-later",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1-no-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1-no-RFN.json",
+ "referenceNumber": 323,
+ "name": "SIL Open Font License 1.1 with no Reserved Font Name",
+ "licenseId": "OFL-1.1-no-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSL-1.0.json",
+ "referenceNumber": 324,
+ "name": "Boost Software License 1.0",
+ "licenseId": "BSL-1.0",
+ "seeAlso": [
+ "http://www.boost.org/LICENSE_1_0.txt",
+ "https://opensource.org/licenses/BSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Libpng.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Libpng.json",
+ "referenceNumber": 325,
+ "name": "libpng License",
+ "licenseId": "Libpng",
+ "seeAlso": [
+ "http://www.libpng.org/pub/png/src/libpng-LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-3.0.json",
+ "referenceNumber": 326,
+ "name": "Creative Commons Attribution Non Commercial 3.0 Unported",
+ "licenseId": "CC-BY-NC-3.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/3.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-2.0.json",
+ "referenceNumber": 327,
+ "name": "Creative Commons Attribution Non Commercial 2.0 Generic",
+ "licenseId": "CC-BY-NC-2.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/2.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unlicense.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unlicense.json",
+ "referenceNumber": 328,
+ "name": "The Unlicense",
+ "licenseId": "Unlicense",
+ "seeAlso": [
+ "https://unlicense.org/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPL-1.0.json",
+ "referenceNumber": 329,
+ "name": "Lucent Public License Version 1.0",
+ "licenseId": "LPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/LPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/bzip2-1.0.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/bzip2-1.0.5.json",
+ "referenceNumber": 330,
+ "name": "bzip2 and libbzip2 License v1.0.5",
+ "licenseId": "bzip2-1.0.5",
+ "seeAlso": [
+ "https://sourceware.org/bzip2/1.0.5/bzip2-manual-1.0.5.html",
+ "http://bzip.org/1.0.5/bzip2-manual-1.0.5.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Entessa.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Entessa.json",
+ "referenceNumber": 331,
+ "name": "Entessa Public License v1.0",
+ "licenseId": "Entessa",
+ "seeAlso": [
+ "https://opensource.org/licenses/Entessa"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-Patent.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-Patent.json",
+ "referenceNumber": 332,
+ "name": "BSD-2-Clause Plus Patent License",
+ "licenseId": "BSD-2-Clause-Patent",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSDplusPatent"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ECL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ECL-2.0.json",
+ "referenceNumber": 333,
+ "name": "Educational Community License v2.0",
+ "licenseId": "ECL-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/ECL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Crossword.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Crossword.json",
+ "referenceNumber": 334,
+ "name": "Crossword License",
+ "licenseId": "Crossword",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Crossword"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-1.0.json",
+ "referenceNumber": 335,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 1.0 Generic",
+ "licenseId": "CC-BY-NC-ND-1.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nd-nc/1.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OCLC-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OCLC-2.0.json",
+ "referenceNumber": 336,
+ "name": "OCLC Research Public License 2.0",
+ "licenseId": "OCLC-2.0",
+ "seeAlso": [
+ "http://www.oclc.org/research/activities/software/license/v2final.htm",
+ "https://opensource.org/licenses/OCLC-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-1.1.json",
+ "referenceNumber": 337,
+ "name": "CeCILL Free Software License Agreement v1.1",
+ "licenseId": "CECILL-1.1",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V1.1-US.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-2.1.json",
+ "referenceNumber": 338,
+ "name": "CeCILL Free Software License Agreement v2.1",
+ "licenseId": "CECILL-2.1",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL_V2.1-en.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGDL-Taiwan-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGDL-Taiwan-1.0.json",
+ "referenceNumber": 339,
+ "name": "Taiwan Open Government Data License, version 1.0",
+ "licenseId": "OGDL-Taiwan-1.0",
+ "seeAlso": [
+ "https://data.gov.tw/license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Abstyles.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Abstyles.json",
+ "referenceNumber": 340,
+ "name": "Abstyles License",
+ "licenseId": "Abstyles",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Abstyles"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/libselinux-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/libselinux-1.0.json",
+ "referenceNumber": 341,
+ "name": "libselinux public domain notice",
+ "licenseId": "libselinux-1.0",
+ "seeAlso": [
+ "https://github.com/SELinuxProject/selinux/blob/master/libselinux/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ANTLR-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ANTLR-PD.json",
+ "referenceNumber": 342,
+ "name": "ANTLR Software Rights Notice",
+ "licenseId": "ANTLR-PD",
+ "seeAlso": [
+ "http://www.antlr2.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-or-later.json",
+ "referenceNumber": 343,
+ "name": "GNU General Public License v2.0 or later",
+ "licenseId": "GPL-2.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html",
+ "https://opensource.org/licenses/GPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/IPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IPL-1.0.json",
+ "referenceNumber": 344,
+ "name": "IBM Public License v1.0",
+ "licenseId": "IPL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/IPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-enna.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-enna.json",
+ "referenceNumber": 345,
+ "name": "enna License",
+ "licenseId": "MIT-enna",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#enna"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPOL-1.02.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPOL-1.02.json",
+ "referenceNumber": 346,
+ "name": "Code Project Open License 1.02",
+ "licenseId": "CPOL-1.02",
+ "seeAlso": [
+ "http://www.codeproject.com/info/cpol10.aspx"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-3.0-AT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-3.0-AT.json",
+ "referenceNumber": 347,
+ "name": "Creative Commons Attribution Share Alike 3.0 Austria",
+ "licenseId": "CC-BY-SA-3.0-AT",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/3.0/at/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0-with-GCC-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0-with-GCC-exception.json",
+ "referenceNumber": 348,
+ "name": "GNU General Public License v3.0 w/GCC Runtime Library exception",
+ "licenseId": "GPL-3.0-with-GCC-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gcc-exception-3.1.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-1-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-1-Clause.json",
+ "referenceNumber": 349,
+ "name": "BSD 1-Clause License",
+ "licenseId": "BSD-1-Clause",
+ "seeAlso": [
+ "https://svnweb.freebsd.org/base/head/include/ifaddrs.h?revision\u003d326823"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NTP-0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NTP-0.json",
+ "referenceNumber": 350,
+ "name": "NTP No Attribution",
+ "licenseId": "NTP-0",
+ "seeAlso": [
+ "https://github.com/tytso/e2fsprogs/blob/master/lib/et/et_name.c"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SugarCRM-1.1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SugarCRM-1.1.3.json",
+ "referenceNumber": 351,
+ "name": "SugarCRM Public License v1.1.3",
+ "licenseId": "SugarCRM-1.1.3",
+ "seeAlso": [
+ "http://www.sugarcrm.com/crm/SPL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT.json",
+ "referenceNumber": 352,
+ "name": "MIT License",
+ "licenseId": "MIT",
+ "seeAlso": [
+ "https://opensource.org/licenses/MIT"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.1-RFN.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.1-RFN.json",
+ "referenceNumber": 353,
+ "name": "SIL Open Font License 1.1 with Reserved Font Name",
+ "licenseId": "OFL-1.1-RFN",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL_web",
+ "https://opensource.org/licenses/OFL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Watcom-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Watcom-1.0.json",
+ "referenceNumber": 354,
+ "name": "Sybase Open Watcom Public License 1.0",
+ "licenseId": "Watcom-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Watcom-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-FR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-SA-2.0-FR.json",
+ "referenceNumber": 355,
+ "name": "Creative Commons Attribution-NonCommercial-ShareAlike 2.0 France",
+ "licenseId": "CC-BY-NC-SA-2.0-FR",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-sa/2.0/fr/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ODbL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ODbL-1.0.json",
+ "referenceNumber": 356,
+ "name": "Open Data Commons Open Database License v1.0",
+ "licenseId": "ODbL-1.0",
+ "seeAlso": [
+ "http://www.opendatacommons.org/licenses/odbl/1.0/",
+ "https://opendatacommons.org/licenses/odbl/1-0/"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/FSFULLR.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/FSFULLR.json",
+ "referenceNumber": 357,
+ "name": "FSF Unlimited License (with License Retention)",
+ "licenseId": "FSFULLR",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/FSF_Unlimited_License#License_Retention_Variant"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.3.json",
+ "referenceNumber": 358,
+ "name": "Open LDAP Public License v1.3",
+ "licenseId": "OLDAP-1.3",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003de5f8117f0ce088d0bd7a8e18ddf37eaa40eb09b1"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSH-OpenSSH.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSH-OpenSSH.json",
+ "referenceNumber": 359,
+ "name": "SSH OpenSSH license",
+ "licenseId": "SSH-OpenSSH",
+ "seeAlso": [
+ "https://github.com/openssh/openssh-portable/blob/1b11ea7c58cd5c59838b5fa574cd456d6047b2d4/LICENCE#L10"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause.json",
+ "referenceNumber": 360,
+ "name": "BSD 2-Clause \"Simplified\" License",
+ "licenseId": "BSD-2-Clause",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSD-2-Clause"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/HPND.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HPND.json",
+ "referenceNumber": 361,
+ "name": "Historical Permission Notice and Disclaimer",
+ "licenseId": "HPND",
+ "seeAlso": [
+ "https://opensource.org/licenses/HPND"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Zimbra-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Zimbra-1.3.json",
+ "referenceNumber": 362,
+ "name": "Zimbra Public License v1.3",
+ "licenseId": "Zimbra-1.3",
+ "seeAlso": [
+ "http://web.archive.org/web/20100302225219/http://www.zimbra.com/license/zimbra-public-license-1-3.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Borceux.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Borceux.json",
+ "referenceNumber": 363,
+ "name": "Borceux license",
+ "licenseId": "Borceux",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Borceux"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-1.1.json",
+ "referenceNumber": 364,
+ "name": "Open LDAP Public License v1.1",
+ "licenseId": "OLDAP-1.1",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d806557a5ad59804ef3a44d5abfbe91d706b0791f"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OFL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OFL-1.0.json",
+ "referenceNumber": 365,
+ "name": "SIL Open Font License 1.0",
+ "licenseId": "OFL-1.0",
+ "seeAlso": [
+ "http://scripts.sil.org/cms/scripts/page.php?item_id\u003dOFL10_web"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NASA-1.3.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NASA-1.3.json",
+ "referenceNumber": 366,
+ "name": "NASA Open Source Agreement 1.3",
+ "licenseId": "NASA-1.3",
+ "seeAlso": [
+ "http://ti.arc.nasa.gov/opensource/nosa/",
+ "https://opensource.org/licenses/NASA-1.3"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/VOSTROM.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/VOSTROM.json",
+ "referenceNumber": 367,
+ "name": "VOSTROM Public License for Open Source",
+ "licenseId": "VOSTROM",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/VOSTROM"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-0.json",
+ "referenceNumber": 368,
+ "name": "MIT No Attribution",
+ "licenseId": "MIT-0",
+ "seeAlso": [
+ "https://github.com/aws/mit-0",
+ "https://romanrm.net/mit-zero",
+ "https://github.com/awsdocs/aws-cloud9-user-guide/blob/master/LICENSE-SAMPLECODE"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ISC.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ISC.json",
+ "referenceNumber": 369,
+ "name": "ISC License",
+ "licenseId": "ISC",
+ "seeAlso": [
+ "https://www.isc.org/licenses/",
+ "https://www.isc.org/downloads/software-support-policy/isc-license/",
+ "https://opensource.org/licenses/ISC"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Unicode-DFS-2016.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Unicode-DFS-2016.json",
+ "referenceNumber": 370,
+ "name": "Unicode License Agreement - Data Files and Software (2016)",
+ "licenseId": "Unicode-DFS-2016",
+ "seeAlso": [
+ "http://www.unicode.org/copyright.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BlueOak-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BlueOak-1.0.0.json",
+ "referenceNumber": 371,
+ "name": "Blue Oak Model License 1.0.0",
+ "licenseId": "BlueOak-1.0.0",
+ "seeAlso": [
+ "https://blueoakcouncil.org/license/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-Rplus-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-Rplus-1.1.json",
+ "referenceNumber": 372,
+ "name": "Licence Libre du Québec – Réciprocité forte version 1.1",
+ "licenseId": "LiLiQ-Rplus-1.1",
+ "seeAlso": [
+ "https://www.forge.gouv.qc.ca/participez/licence-logicielle/licence-libre-du-quebec-liliq-en-francais/licence-libre-du-quebec-reciprocite-forte-liliq-r-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-Rplus-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NOSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NOSL.json",
+ "referenceNumber": 373,
+ "name": "Netizen Open Source License",
+ "licenseId": "NOSL",
+ "seeAlso": [
+ "http://bits.netizen.com.au/licenses/NOSL/nosl.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SMLNJ.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SMLNJ.json",
+ "referenceNumber": 374,
+ "name": "Standard ML of New Jersey License",
+ "licenseId": "SMLNJ",
+ "seeAlso": [
+ "https://www.smlnj.org/license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0+.json",
+ "referenceNumber": 375,
+ "name": "GNU Lesser General Public License v3.0 or later",
+ "licenseId": "LGPL-3.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CPAL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CPAL-1.0.json",
+ "referenceNumber": 376,
+ "name": "Common Public Attribution License 1.0",
+ "licenseId": "CPAL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/CPAL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/PSF-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PSF-2.0.json",
+ "referenceNumber": 377,
+ "name": "Python Software Foundation License 2.0",
+ "licenseId": "PSF-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Python-2.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPL-1.5.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPL-1.5.json",
+ "referenceNumber": 378,
+ "name": "Reciprocal Public License 1.5",
+ "licenseId": "RPL-1.5",
+ "seeAlso": [
+ "https://opensource.org/licenses/RPL-1.5"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-FreeBSD.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-FreeBSD.json",
+ "referenceNumber": 379,
+ "name": "BSD 2-Clause FreeBSD License",
+ "licenseId": "BSD-2-Clause-FreeBSD",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/freebsd-license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-Modern-Variant.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-Modern-Variant.json",
+ "referenceNumber": 380,
+ "name": "MIT License Modern Variant",
+ "licenseId": "MIT-Modern-Variant",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:MIT#Modern_Variants",
+ "https://ptolemy.berkeley.edu/copyright.htm",
+ "https://pirlwww.lpl.arizona.edu/resources/guide/software/PerlTk/Tixlic.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Nokia.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Nokia.json",
+ "referenceNumber": 381,
+ "name": "Nokia Open Source License",
+ "licenseId": "Nokia",
+ "seeAlso": [
+ "https://opensource.org/licenses/nokia"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-no-invariants-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-no-invariants-only.json",
+ "referenceNumber": 382,
+ "name": "GNU Free Documentation License v1.1 only - no invariants",
+ "licenseId": "GFDL-1.1-no-invariants-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PDDL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PDDL-1.0.json",
+ "referenceNumber": 383,
+ "name": "Open Data Commons Public Domain Dedication \u0026 License 1.0",
+ "licenseId": "PDDL-1.0",
+ "seeAlso": [
+ "http://opendatacommons.org/licenses/pddl/1.0/",
+ "https://opendatacommons.org/licenses/pddl/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.0.json",
+ "referenceNumber": 384,
+ "name": "European Union Public License 1.0",
+ "licenseId": "EUPL-1.0",
+ "seeAlso": [
+ "http://ec.europa.eu/idabc/en/document/7330.html",
+ "http://ec.europa.eu/idabc/servlets/Doc027f.pdf?id\u003d31096"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDDL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDDL-1.1.json",
+ "referenceNumber": 385,
+ "name": "Common Development and Distribution License 1.1",
+ "licenseId": "CDDL-1.1",
+ "seeAlso": [
+ "http://glassfish.java.net/public/CDDL+GPL_1_1.html",
+ "https://javaee.github.io/glassfish/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.3-only.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.3-only.json",
+ "referenceNumber": 386,
+ "name": "GNU Free Documentation License v1.3 only",
+ "licenseId": "GFDL-1.3-only",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/fdl-1.3.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.6.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.6.json",
+ "referenceNumber": 387,
+ "name": "Open LDAP Public License v2.6",
+ "licenseId": "OLDAP-2.6",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d1cae062821881f41b73012ba816434897abf4205"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/JSON.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/JSON.json",
+ "referenceNumber": 388,
+ "name": "JSON License",
+ "licenseId": "JSON",
+ "seeAlso": [
+ "http://www.json.org/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-3.0-or-later.json",
+ "referenceNumber": 389,
+ "name": "GNU Lesser General Public License v3.0 or later",
+ "licenseId": "LGPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/lgpl-3.0-standalone.html",
+ "https://opensource.org/licenses/LGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-3.0.json",
+ "referenceNumber": 390,
+ "name": "GNU General Public License v3.0 only",
+ "licenseId": "GPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-3.0-standalone.html",
+ "https://opensource.org/licenses/GPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Fair.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Fair.json",
+ "referenceNumber": 391,
+ "name": "Fair License",
+ "licenseId": "Fair",
+ "seeAlso": [
+ "http://fairlicense.org/",
+ "https://opensource.org/licenses/Fair"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-font-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-font-exception.json",
+ "referenceNumber": 392,
+ "name": "GNU General Public License v2.0 w/Font exception",
+ "licenseId": "GPL-2.0-with-font-exception",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/gpl-faq.html#FontException"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-2.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-2.1.json",
+ "referenceNumber": 393,
+ "name": "Open Software License 2.1",
+ "licenseId": "OSL-2.1",
+ "seeAlso": [
+ "http://web.archive.org/web/20050212003940/http://www.rosenlaw.com/osl21.htm",
+ "https://opensource.org/licenses/OSL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LPPL-1.3a.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LPPL-1.3a.json",
+ "referenceNumber": 394,
+ "name": "LaTeX Project Public License v1.3a",
+ "licenseId": "LPPL-1.3a",
+ "seeAlso": [
+ "http://www.latex-project.org/lppl/lppl-1-3a.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NAIST-2003.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NAIST-2003.json",
+ "referenceNumber": 395,
+ "name": "Nara Institute of Science and Technology License (2003)",
+ "licenseId": "NAIST-2003",
+ "seeAlso": [
+ "https://enterprise.dejacode.com/licenses/public/naist-2003/#license-text",
+ "https://github.com/nodejs/node/blob/4a19cc8947b1bba2b2d27816ec3d0edf9b28e503/LICENSE#L343"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-4.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-4.0.json",
+ "referenceNumber": 396,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 4.0 International",
+ "licenseId": "CC-BY-NC-ND-4.0",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-3.0-DE.json",
+ "referenceNumber": 397,
+ "name": "Creative Commons Attribution Non Commercial 3.0 Germany",
+ "licenseId": "CC-BY-NC-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1+.json",
+ "referenceNumber": 398,
+ "name": "GNU Library General Public License v2.1 or later",
+ "licenseId": "LGPL-2.1+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OPL-1.0.json",
+ "referenceNumber": 399,
+ "name": "Open Public License v1.0",
+ "licenseId": "OPL-1.0",
+ "seeAlso": [
+ "http://old.koalateam.com/jackaroo/OPL_1_0.TXT",
+ "https://fedoraproject.org/wiki/Licensing/Open_Public_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/HPND-sell-variant.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/HPND-sell-variant.json",
+ "referenceNumber": 400,
+ "name": "Historical Permission Notice and Disclaimer - sell variant",
+ "licenseId": "HPND-sell-variant",
+ "seeAlso": [
+ "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/auth_gss/gss_generic_token.c?h\u003dv4.19"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/QPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/QPL-1.0.json",
+ "referenceNumber": 401,
+ "name": "Q Public License 1.0",
+ "licenseId": "QPL-1.0",
+ "seeAlso": [
+ "http://doc.qt.nokia.com/3.3/license.html",
+ "https://opensource.org/licenses/QPL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.2.json",
+ "referenceNumber": 402,
+ "name": "European Union Public License 1.2",
+ "licenseId": "EUPL-1.2",
+ "seeAlso": [
+ "https://joinup.ec.europa.eu/page/eupl-text-11-12",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/eupl_v1.2_en.pdf",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/2020-03/EUPL-1.2%20EN.txt",
+ "https://joinup.ec.europa.eu/sites/default/files/inline-files/EUPL%20v1_2%20EN(1).txt",
+ "http://eur-lex.europa.eu/legal-content/EN/TXT/HTML/?uri\u003dCELEX:32017D0863",
+ "https://opensource.org/licenses/EUPL-1.2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.2-no-invariants-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.2-no-invariants-or-later.json",
+ "referenceNumber": 403,
+ "name": "GNU Free Documentation License v1.2 or later - no invariants",
+ "licenseId": "GFDL-1.2-no-invariants-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/eCos-2.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/eCos-2.0.json",
+ "referenceNumber": 404,
+ "name": "eCos license version 2.0",
+ "licenseId": "eCos-2.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/ecos-license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NCGL-UK-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NCGL-UK-2.0.json",
+ "referenceNumber": 405,
+ "name": "Non-Commercial Government Licence",
+ "licenseId": "NCGL-UK-2.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/non-commercial-government-licence/version/2/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Beerware.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Beerware.json",
+ "referenceNumber": 406,
+ "name": "Beerware License",
+ "licenseId": "Beerware",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Beerware",
+ "https://people.freebsd.org/~phk/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Open-MPI.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Open-MPI.json",
+ "referenceNumber": 407,
+ "name": "BSD 3-Clause Open MPI variant",
+ "licenseId": "BSD-3-Clause-Open-MPI",
+ "seeAlso": [
+ "https://www.open-mpi.org/community/license.php",
+ "http://www.netlib.org/lapack/LICENSE.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-bison-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-bison-exception.json",
+ "referenceNumber": 408,
+ "name": "GNU General Public License v2.0 w/Bison exception",
+ "licenseId": "GPL-2.0-with-bison-exception",
+ "seeAlso": [
+ "http://git.savannah.gnu.org/cgit/bison.git/tree/data/yacc.c?id\u003d193d7c7054ba7197b0789e14965b739162319b5e#n141"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-B.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-B.json",
+ "referenceNumber": 409,
+ "name": "CeCILL-B Free Software License Agreement",
+ "licenseId": "CECILL-B",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-2.0-with-autoconf-exception.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-2.0-with-autoconf-exception.json",
+ "referenceNumber": 410,
+ "name": "GNU General Public License v2.0 w/Autoconf exception",
+ "licenseId": "GPL-2.0-with-autoconf-exception",
+ "seeAlso": [
+ "http://ac-archive.sourceforge.net/doc/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EPL-2.0.json",
+ "referenceNumber": 411,
+ "name": "Eclipse Public License 2.0",
+ "licenseId": "EPL-2.0",
+ "seeAlso": [
+ "https://www.eclipse.org/legal/epl-2.0",
+ "https://www.opensource.org/licenses/EPL-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-feh.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-feh.json",
+ "referenceNumber": 412,
+ "name": "feh License",
+ "licenseId": "MIT-feh",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/MIT#feh"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPL-1.1.json",
+ "referenceNumber": 413,
+ "name": "Reciprocal Public License 1.1",
+ "licenseId": "RPL-1.1",
+ "seeAlso": [
+ "https://opensource.org/licenses/RPL-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CDLA-Permissive-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CDLA-Permissive-1.0.json",
+ "referenceNumber": 414,
+ "name": "Community Data License Agreement Permissive 1.0",
+ "licenseId": "CDLA-Permissive-1.0",
+ "seeAlso": [
+ "https://cdla.io/permissive-1-0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Python-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Python-2.0.json",
+ "referenceNumber": 415,
+ "name": "Python License 2.0",
+ "licenseId": "Python-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/Python-2.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/MPL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MPL-1.0.json",
+ "referenceNumber": 416,
+ "name": "Mozilla Public License 1.0",
+ "licenseId": "MPL-1.0",
+ "seeAlso": [
+ "http://www.mozilla.org/MPL/MPL-1.0.html",
+ "https://opensource.org/licenses/MPL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/GFDL-1.1-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/GFDL-1.1-or-later.json",
+ "referenceNumber": 417,
+ "name": "GNU Free Documentation License v1.1 or later",
+ "licenseId": "GFDL-1.1-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/fdl-1.1.txt"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/diffmark.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/diffmark.json",
+ "referenceNumber": 418,
+ "name": "diffmark license",
+ "licenseId": "diffmark",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/diffmark"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/GPL-1.0+.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/GPL-1.0+.json",
+ "referenceNumber": 419,
+ "name": "GNU General Public License v1.0 or later",
+ "licenseId": "GPL-1.0+",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/gpl-1.0-standalone.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OpenSSL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OpenSSL.json",
+ "referenceNumber": 420,
+ "name": "OpenSSL License",
+ "licenseId": "OpenSSL",
+ "seeAlso": [
+ "http://www.openssl.org/source/license.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-1.0.json",
+ "referenceNumber": 421,
+ "name": "Open Software License 1.0",
+ "licenseId": "OSL-1.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/OSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Parity-6.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Parity-6.0.0.json",
+ "referenceNumber": 422,
+ "name": "The Parity Public License 6.0.0",
+ "licenseId": "Parity-6.0.0",
+ "seeAlso": [
+ "https://paritylicense.com/versions/6.0.0.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-1.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-1.0.json",
+ "referenceNumber": 423,
+ "name": "Affero General Public License v1.0",
+ "licenseId": "AGPL-1.0",
+ "seeAlso": [
+ "http://www.affero.org/oagpl.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/YPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/YPL-1.1.json",
+ "referenceNumber": 424,
+ "name": "Yahoo! Public License v1.1",
+ "licenseId": "YPL-1.1",
+ "seeAlso": [
+ "http://www.zimbra.com/license/yahoo_public_license_1.1.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/SSH-short.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SSH-short.json",
+ "referenceNumber": 425,
+ "name": "SSH short notice",
+ "licenseId": "SSH-short",
+ "seeAlso": [
+ "https://github.com/openssh/openssh-portable/blob/1b11ea7c58cd5c59838b5fa574cd456d6047b2d4/pathnames.h",
+ "http://web.mit.edu/kolya/.f/root/athena.mit.edu/sipb.mit.edu/project/openssh/OldFiles/src/openssh-2.9.9p2/ssh-add.1",
+ "https://joinup.ec.europa.eu/svn/lesoll/trunk/italc/lib/src/dsa_key.cpp"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/IBM-pibs.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/IBM-pibs.json",
+ "referenceNumber": 426,
+ "name": "IBM PowerPC Initialization and Boot Software",
+ "licenseId": "IBM-pibs",
+ "seeAlso": [
+ "http://git.denx.de/?p\u003du-boot.git;a\u003dblob;f\u003darch/powerpc/cpu/ppc4xx/miiphy.c;h\u003d297155fdafa064b955e53e9832de93bfb0cfb85b;hb\u003d9fab4bf4cc077c21e43941866f3f2c196f28670d"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Xnet.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Xnet.json",
+ "referenceNumber": 427,
+ "name": "X.Net License",
+ "licenseId": "Xnet",
+ "seeAlso": [
+ "https://opensource.org/licenses/Xnet"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/TU-Berlin-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/TU-Berlin-1.0.json",
+ "referenceNumber": 428,
+ "name": "Technische Universitaet Berlin License 1.0",
+ "licenseId": "TU-Berlin-1.0",
+ "seeAlso": [
+ "https://github.com/swh/ladspa/blob/7bf6f3799fdba70fda297c2d8fd9f526803d9680/gsm/COPYRIGHT"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0.json",
+ "referenceNumber": 429,
+ "name": "GNU Affero General Public License v3.0",
+ "licenseId": "AGPL-3.0",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CAL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CAL-1.0.json",
+ "referenceNumber": 430,
+ "name": "Cryptographic Autonomy License 1.0",
+ "licenseId": "CAL-1.0",
+ "seeAlso": [
+ "http://cryptographicautonomylicense.com/license-text.html",
+ "https://opensource.org/licenses/CAL-1.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/AFL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AFL-3.0.json",
+ "referenceNumber": 431,
+ "name": "Academic Free License v3.0",
+ "licenseId": "AFL-3.0",
+ "seeAlso": [
+ "http://www.rosenlaw.com/AFL3.0.htm",
+ "https://opensource.org/licenses/afl-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CECILL-C.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CECILL-C.json",
+ "referenceNumber": 432,
+ "name": "CeCILL-C Free Software License Agreement",
+ "licenseId": "CECILL-C",
+ "seeAlso": [
+ "http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-3.0.json",
+ "referenceNumber": 433,
+ "name": "Open Government Licence v3.0",
+ "licenseId": "OGL-UK-3.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Clear.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Clear.json",
+ "referenceNumber": 434,
+ "name": "BSD 3-Clause Clear License",
+ "licenseId": "BSD-3-Clause-Clear",
+ "seeAlso": [
+ "http://labs.metacarta.com/license-explanation.html#license"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Modification.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Modification.json",
+ "referenceNumber": 435,
+ "name": "BSD 3-Clause Modification",
+ "licenseId": "BSD-3-Clause-Modification",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:BSD#Modification_Variant"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-SA-2.0-UK.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-SA-2.0-UK.json",
+ "referenceNumber": 436,
+ "name": "Creative Commons Attribution Share Alike 2.0 England and Wales",
+ "licenseId": "CC-BY-SA-2.0-UK",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-sa/2.0/uk/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Saxpath.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Saxpath.json",
+ "referenceNumber": 437,
+ "name": "Saxpath License",
+ "licenseId": "Saxpath",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Saxpath_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NLPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NLPL.json",
+ "referenceNumber": 438,
+ "name": "No Limit Public License",
+ "licenseId": "NLPL",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/NLPL"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/SimPL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/SimPL-2.0.json",
+ "referenceNumber": 439,
+ "name": "Simple Public License 2.0",
+ "licenseId": "SimPL-2.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/SimPL-2.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/psfrag.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/psfrag.json",
+ "referenceNumber": 440,
+ "name": "psfrag License",
+ "licenseId": "psfrag",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/psfrag"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Spencer-86.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Spencer-86.json",
+ "referenceNumber": 441,
+ "name": "Spencer License 86",
+ "licenseId": "Spencer-86",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Henry_Spencer_Reg-Ex_Library_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OCCT-PL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OCCT-PL.json",
+ "referenceNumber": 442,
+ "name": "Open CASCADE Technology Public License",
+ "licenseId": "OCCT-PL",
+ "seeAlso": [
+ "http://www.opencascade.com/content/occt-public-license"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/CERN-OHL-S-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CERN-OHL-S-2.0.json",
+ "referenceNumber": 443,
+ "name": "CERN Open Hardware Licence Version 2 - Strongly Reciprocal",
+ "licenseId": "CERN-OHL-S-2.0",
+ "seeAlso": [
+ "https://www.ohwr.org/project/cernohl/wikis/Documents/CERN-OHL-version-2"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/ErlPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ErlPL-1.1.json",
+ "referenceNumber": 444,
+ "name": "Erlang Public License v1.1",
+ "licenseId": "ErlPL-1.1",
+ "seeAlso": [
+ "http://www.erlang.org/EPLICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/MIT-CMU.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/MIT-CMU.json",
+ "referenceNumber": 445,
+ "name": "CMU License",
+ "licenseId": "MIT-CMU",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing:MIT?rd\u003dLicensing/MIT#CMU_Style",
+ "https://github.com/python-pillow/Pillow/blob/fffb426092c8db24a5f4b6df243a8a3c01fb63cd/LICENSE"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/NIST-PD.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NIST-PD.json",
+ "referenceNumber": 446,
+ "name": "NIST Public Domain Notice",
+ "licenseId": "NIST-PD",
+ "seeAlso": [
+ "https://github.com/tcheneau/simpleRPL/blob/e645e69e38dd4e3ccfeceb2db8cba05b7c2e0cd3/LICENSE.txt",
+ "https://github.com/tcheneau/Routing/blob/f09f46fcfe636107f22f2c98348188a65a135d98/README.md"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OSL-2.0.json",
+ "referenceNumber": 447,
+ "name": "Open Software License 2.0",
+ "licenseId": "OSL-2.0",
+ "seeAlso": [
+ "http://web.archive.org/web/20041020171434/http://www.rosenlaw.com/osl2.0.html"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/APSL-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/APSL-2.0.json",
+ "referenceNumber": 448,
+ "name": "Apple Public Source License 2.0",
+ "licenseId": "APSL-2.0",
+ "seeAlso": [
+ "http://www.opensource.apple.com/license/apsl/"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Leptonica.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Leptonica.json",
+ "referenceNumber": 449,
+ "name": "Leptonica License",
+ "licenseId": "Leptonica",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Leptonica"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/PolyForm-Small-Business-1.0.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/PolyForm-Small-Business-1.0.0.json",
+ "referenceNumber": 450,
+ "name": "PolyForm Small Business License 1.0.0",
+ "licenseId": "PolyForm-Small-Business-1.0.0",
+ "seeAlso": [
+ "https://polyformproject.org/licenses/small-business/1.0.0"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/LiLiQ-P-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/LiLiQ-P-1.1.json",
+ "referenceNumber": 451,
+ "name": "Licence Libre du Québec – Permissive version 1.1",
+ "licenseId": "LiLiQ-P-1.1",
+ "seeAlso": [
+ "https://forge.gouv.qc.ca/licence/fr/liliq-v1-1/",
+ "http://opensource.org/licenses/LiLiQ-P-1.1"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NetCDF.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NetCDF.json",
+ "referenceNumber": 452,
+ "name": "NetCDF license",
+ "licenseId": "NetCDF",
+ "seeAlso": [
+ "http://www.unidata.ucar.edu/software/netcdf/copyright.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/OML.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OML.json",
+ "referenceNumber": 453,
+ "name": "Open Market License",
+ "licenseId": "OML",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/Open_Market_License"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/AGPL-3.0-or-later.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/AGPL-3.0-or-later.json",
+ "referenceNumber": 454,
+ "name": "GNU Affero General Public License v3.0 or later",
+ "licenseId": "AGPL-3.0-or-later",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/agpl.txt",
+ "https://opensource.org/licenses/AGPL-3.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OLDAP-2.2.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OLDAP-2.2.json",
+ "referenceNumber": 455,
+ "name": "Open LDAP Public License v2.2",
+ "licenseId": "OLDAP-2.2",
+ "seeAlso": [
+ "http://www.openldap.org/devel/gitweb.cgi?p\u003dopenldap.git;a\u003dblob;f\u003dLICENSE;hb\u003d470b0c18ec67621c85881b2733057fecf4a1acc3"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause.json",
+ "referenceNumber": 456,
+ "name": "BSD 3-Clause \"New\" or \"Revised\" License",
+ "licenseId": "BSD-3-Clause",
+ "seeAlso": [
+ "https://opensource.org/licenses/BSD-3-Clause"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/WTFPL.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/WTFPL.json",
+ "referenceNumber": 457,
+ "name": "Do What The F*ck You Want To Public License",
+ "licenseId": "WTFPL",
+ "seeAlso": [
+ "http://www.wtfpl.net/about/",
+ "http://sam.zoy.org/wtfpl/COPYING"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/OGL-UK-2.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/OGL-UK-2.0.json",
+ "referenceNumber": 458,
+ "name": "Open Government Licence v2.0",
+ "licenseId": "OGL-UK-2.0",
+ "seeAlso": [
+ "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/2/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-3-Clause-Attribution.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-3-Clause-Attribution.json",
+ "referenceNumber": 459,
+ "name": "BSD with attribution",
+ "licenseId": "BSD-3-Clause-Attribution",
+ "seeAlso": [
+ "https://fedoraproject.org/wiki/Licensing/BSD_with_Attribution"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/RPSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/RPSL-1.0.json",
+ "referenceNumber": 460,
+ "name": "RealNetworks Public Source License v1.0",
+ "licenseId": "RPSL-1.0",
+ "seeAlso": [
+ "https://helixcommunity.org/content/rpsl",
+ "https://opensource.org/licenses/RPSL-1.0"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-DE.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/CC-BY-NC-ND-3.0-DE.json",
+ "referenceNumber": 461,
+ "name": "Creative Commons Attribution Non Commercial No Derivatives 3.0 Germany",
+ "licenseId": "CC-BY-NC-ND-3.0-DE",
+ "seeAlso": [
+ "https://creativecommons.org/licenses/by-nc-nd/3.0/de/legalcode"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/EUPL-1.1.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/EUPL-1.1.json",
+ "referenceNumber": 462,
+ "name": "European Union Public License 1.1",
+ "licenseId": "EUPL-1.1",
+ "seeAlso": [
+ "https://joinup.ec.europa.eu/software/page/eupl/licence-eupl",
+ "https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/eupl1.1.-licence-en_0.pdf",
+ "https://opensource.org/licenses/EUPL-1.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/Sendmail-8.23.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Sendmail-8.23.json",
+ "referenceNumber": 463,
+ "name": "Sendmail License 8.23",
+ "licenseId": "Sendmail-8.23",
+ "seeAlso": [
+ "https://www.proofpoint.com/sites/default/files/sendmail-license.pdf",
+ "https://web.archive.org/web/20181003101040/https://www.proofpoint.com/sites/default/files/sendmail-license.pdf"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/ODC-By-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/ODC-By-1.0.json",
+ "referenceNumber": 464,
+ "name": "Open Data Commons Attribution License v1.0",
+ "licenseId": "ODC-By-1.0",
+ "seeAlso": [
+ "https://opendatacommons.org/licenses/by/1.0/"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/D-FSL-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/D-FSL-1.0.json",
+ "referenceNumber": 465,
+ "name": "Deutsche Freie Software Lizenz",
+ "licenseId": "D-FSL-1.0",
+ "seeAlso": [
+ "http://www.dipp.nrw.de/d-fsl/lizenzen/",
+ "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/de/D-FSL-1_0_de.txt",
+ "http://www.dipp.nrw.de/d-fsl/index_html/lizenzen/en/D-FSL-1_0_en.txt",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/deutsche-freie-software-lizenz",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/german-free-software-license",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/D-FSL-1_0_de.txt/at_download/file",
+ "https://www.hbz-nrw.de/produkte/open-access/lizenzen/dfsl/D-FSL-1_0_en.txt/at_download/file"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-4-Clause.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-4-Clause.json",
+ "referenceNumber": 466,
+ "name": "BSD 4-Clause \"Original\" or \"Old\" License",
+ "licenseId": "BSD-4-Clause",
+ "seeAlso": [
+ "http://directory.fsf.org/wiki/License:BSD_4Clause"
+ ],
+ "isOsiApproved": false,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/LGPL-2.1.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/LGPL-2.1.json",
+ "referenceNumber": 467,
+ "name": "GNU Lesser General Public License v2.1 only",
+ "licenseId": "LGPL-2.1",
+ "seeAlso": [
+ "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html",
+ "https://opensource.org/licenses/LGPL-2.1"
+ ],
+ "isOsiApproved": true,
+ "isFsfLibre": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/BSD-2-Clause-Views.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/BSD-2-Clause-Views.json",
+ "referenceNumber": 468,
+ "name": "BSD 2-Clause with views sentence",
+ "licenseId": "BSD-2-Clause-Views",
+ "seeAlso": [
+ "http://www.freebsd.org/copyright/freebsd-license.html",
+ "https://people.freebsd.org/~ivoras/wine/patch-wine-nvidia.sh",
+ "https://github.com/protegeproject/protege/blob/master/license.txt"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Artistic-1.0-Perl.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Artistic-1.0-Perl.json",
+ "referenceNumber": 469,
+ "name": "Artistic License 1.0 (Perl)",
+ "licenseId": "Artistic-1.0-Perl",
+ "seeAlso": [
+ "http://dev.perl.org/licenses/artistic.html"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/NPOSL-3.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/NPOSL-3.0.json",
+ "referenceNumber": 470,
+ "name": "Non-Profit Open Software License 3.0",
+ "licenseId": "NPOSL-3.0",
+ "seeAlso": [
+ "https://opensource.org/licenses/NOSL3.0"
+ ],
+ "isOsiApproved": true
+ },
+ {
+ "reference": "https://spdx.org/licenses/gSOAP-1.3b.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/gSOAP-1.3b.json",
+ "referenceNumber": 471,
+ "name": "gSOAP Public License v1.3b",
+ "licenseId": "gSOAP-1.3b",
+ "seeAlso": [
+ "http://www.cs.fsu.edu/~engelen/license.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/Interbase-1.0.html",
+ "isDeprecatedLicenseId": false,
+ "detailsUrl": "https://spdx.org/licenses/Interbase-1.0.json",
+ "referenceNumber": 472,
+ "name": "Interbase Public License v1.0",
+ "licenseId": "Interbase-1.0",
+ "seeAlso": [
+ "https://web.archive.org/web/20060319014854/http://info.borland.com/devsupport/interbase/opensource/IPL.html"
+ ],
+ "isOsiApproved": false
+ },
+ {
+ "reference": "https://spdx.org/licenses/StandardML-NJ.html",
+ "isDeprecatedLicenseId": true,
+ "detailsUrl": "https://spdx.org/licenses/StandardML-NJ.json",
+ "referenceNumber": 473,
+ "name": "Standard ML of New Jersey License",
+ "licenseId": "StandardML-NJ",
+ "seeAlso": [
+ "http://www.smlnj.org//license.html"
+ ],
+ "isOsiApproved": false
+ }
+ ],
+ "releaseDate": "2021-08-08"
+} \ No newline at end of file
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
index 1d3c775bbe..ed4af18ced 100644
--- a/meta/lib/oe/cve_check.py
+++ b/meta/lib/oe/cve_check.py
@@ -75,7 +75,138 @@ def cve_check_merge_jsons(output, data):
for product in output["package"]:
if product["name"] == data["package"][0]["name"]:
- bb.error("Error adding the same package twice")
+ bb.error("Error adding the same package %s twice" % product["name"])
return
output["package"].append(data["package"][0])
+
+def update_symlinks(target_path, link_path):
+ """
+ Update a symbolic link link_path to point to target_path.
+ Remove the link and recreate it if exist and is different.
+ """
+ if link_path != target_path and os.path.exists(target_path):
+ if os.path.exists(os.path.realpath(link_path)):
+ os.remove(link_path)
+ os.symlink(os.path.basename(target_path), link_path)
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ pn = d.getVar("PN")
+ cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
+ patched_cves = set()
+ bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
+ for url in oe.patch.src_patches(d):
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = 'cpe:2.3:a:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
index db988d9247..502dfbe3ed 100644
--- a/meta/lib/oe/package_manager.py
+++ b/meta/lib/oe/package_manager.py
@@ -611,12 +611,13 @@ class PackageManager(object, metaclass=ABCMeta):
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- target_arch = self.d.getVar('TARGET_ARCH')
- localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
- if os.path.exists(localedir) and os.listdir(localedir):
- generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
- # And now delete the binary locales
- self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+ if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index a82085a792..feb834c0e3 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -57,6 +57,17 @@ def read_subpkgdata_dict(pkg, d):
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import gzip
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.gz" % pkg)
+ try:
+ with gzip.open(fn, "rt", encoding="utf-8") as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 7cd8436da5..feb6ee7082 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
+import shlex
+import subprocess
import oe.path
import oe.types
@@ -24,7 +27,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
- import pipes
import subprocess
if dir:
@@ -35,7 +37,7 @@ def runcmd(args, dir = None):
# print("cwd: %s -> %s" % (olddir, dir))
try:
- args = [ pipes.quote(str(arg)) for arg in args ]
+ args = [ shlex.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
(exitstatus, output) = subprocess.getstatusoutput(cmd)
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
index 0938e4cb39..1ed79b18ca 100644
--- a/meta/lib/oe/reproducible.py
+++ b/meta/lib/oe/reproducible.py
@@ -62,7 +62,8 @@ def get_source_date_epoch_from_git(d, sourcedir):
return None
bb.debug(1, "git repository: %s" % gitpath)
- p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 9e9f7f1f08..5391c25af9 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -321,7 +321,9 @@ class Rootfs(object, metaclass=ABCMeta):
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
+
versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
bb.utils.mkdirhier(versioned_modules_dir)
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..22ed5070ea
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,84 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_download_spdxid(d, idx):
+ return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def write_doc(d, spdx_doc, subdir, spdx_deploy=None, indent=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
+
+ l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..7aaf2af5ed
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,357 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ ALLOWED_CHECKSUMS = [
+ "SHA1",
+ "SHA224",
+ "SHA256",
+ "SHA384",
+ "SHA512",
+ "MD2",
+ "MD4",
+ "MD5",
+ "MD6",
+ ]
+
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ supplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+ checksums = _ObjectList(SPDXChecksum)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index aeceb100d7..65bb4efe25 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -480,8 +480,10 @@ def OEOuthashBasic(path, sigfile, task, d):
if "package_write_" in task or task == "package_qa":
include_owners = False
include_timestamps = False
+ include_root = True
if task == "package":
include_timestamps = d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1'
+ include_root = False
extra_content = d.getVar('HASHEQUIV_HASH_VERSION')
try:
@@ -592,7 +594,8 @@ def OEOuthashBasic(path, sigfile, task, d):
update_hash("\n")
# Process this directory and all its child files
- process(root)
+ if include_root or root != ".":
+ process(root)
for f in files:
if f == 'fixmepath':
continue
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 2ac39df9e1..a0c166d884 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -102,6 +102,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
diff --git a/meta/lib/oeqa/core/target/ssh.py b/meta/lib/oeqa/core/target/ssh.py
index af4a67f266..832b6216f6 100644
--- a/meta/lib/oeqa/core/target/ssh.py
+++ b/meta/lib/oeqa/core/target/ssh.py
@@ -226,6 +226,9 @@ def SSHCall(command, logger, timeout=None, **opts):
endtime = time.time() + timeout
except InterruptedError:
continue
+ except BlockingIOError:
+ logger.debug('BlockingIOError')
+ continue
# process hasn't returned yet
if not eof:
diff --git a/meta/lib/oeqa/runtime/cases/ltp.py b/meta/lib/oeqa/runtime/cases/ltp.py
index a66d5d13d7..879f2a673c 100644
--- a/meta/lib/oeqa/runtime/cases/ltp.py
+++ b/meta/lib/oeqa/runtime/cases/ltp.py
@@ -67,7 +67,7 @@ class LtpTest(LtpTestBase):
def runltp(self, ltp_group):
cmd = '/opt/ltp/runltp -f %s -p -q -r /opt/ltp -l /opt/ltp/results/%s -I 1 -d /opt/ltp' % (ltp_group, ltp_group)
starttime = time.time()
- (status, output) = self.target.run(cmd)
+ (status, output) = self.target.run(cmd, timeout=1200)
endtime = time.time()
with open(os.path.join(self.ltptest_log_dir, "%s-raw.log" % ltp_group), 'w') as f:
diff --git a/meta/lib/oeqa/runtime/cases/rpm.py b/meta/lib/oeqa/runtime/cases/rpm.py
index 7a9d62c003..203fcc8505 100644
--- a/meta/lib/oeqa/runtime/cases/rpm.py
+++ b/meta/lib/oeqa/runtime/cases/rpm.py
@@ -49,21 +49,20 @@ class RpmBasicTest(OERuntimeTestCase):
msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
self.assertEqual(status, 0, msg=msg)
- def check_no_process_for_user(u):
- _, output = self.target.run(self.tc.target_cmds['ps'])
- if u + ' ' in output:
- return False
- else:
- return True
+ def wait_for_no_process_for_user(u, timeout = 120):
+ timeout_at = time.time() + timeout
+ while time.time() < timeout_at:
+ _, output = self.target.run(self.tc.target_cmds['ps'])
+ if u + ' ' not in output:
+ return
+ time.sleep(1)
+ user_pss = [ps for ps in output.split("\n") if u + ' ' in ps]
+ msg = "User %s has processes still running: %s" % (u, "\n".join(user_pss))
+ self.fail(msg=msg)
def unset_up_test_user(u):
# ensure no test1 process in running
- timeout = time.time() + 30
- while time.time() < timeout:
- if check_no_process_for_user(u):
- break
- else:
- time.sleep(1)
+ wait_for_no_process_for_user(u)
status, output = self.target.run('userdel -r %s' % u)
msg = 'Failed to erase user: %s' % output
self.assertTrue(status == 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/rtc.py b/meta/lib/oeqa/runtime/cases/rtc.py
index c4e6681324..39f4d29f23 100644
--- a/meta/lib/oeqa/runtime/cases/rtc.py
+++ b/meta/lib/oeqa/runtime/cases/rtc.py
@@ -1,5 +1,6 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfFeature
from oeqa.runtime.decorator.package import OEHasPackage
import re
@@ -16,12 +17,14 @@ class RTCTest(OERuntimeTestCase):
self.logger.debug('Starting systemd-timesyncd daemon')
self.target.run('systemctl enable --now --runtime systemd-timesyncd')
+ @skipIfFeature('read-only-rootfs',
+ 'Test does not work with read-only-rootfs in IMAGE_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
def test_rtc(self):
(status, output) = self.target.run('hwclock -r')
self.assertEqual(status, 0, msg='Failed to get RTC time, output: %s' % output)
-
+
(status, current_datetime) = self.target.run('date +"%m%d%H%M%Y"')
self.assertEqual(status, 0, msg='Failed to get system current date & time, output: %s' % current_datetime)
@@ -32,7 +35,6 @@ class RTCTest(OERuntimeTestCase):
(status, output) = self.target.run('date %s' % current_datetime)
self.assertEqual(status, 0, msg='Failed to reset system date & time, output: %s' % output)
-
+
(status, output) = self.target.run('hwclock -w')
self.assertEqual(status, 0, msg='Failed to reset RTC time, output: %s' % output)
-
diff --git a/meta/lib/oeqa/runtime/cases/scp.py b/meta/lib/oeqa/runtime/cases/scp.py
index 3a5f292152..f2bbc947d6 100644
--- a/meta/lib/oeqa/runtime/cases/scp.py
+++ b/meta/lib/oeqa/runtime/cases/scp.py
@@ -23,7 +23,7 @@ class ScpTest(OERuntimeTestCase):
os.remove(cls.tmp_path)
@OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['openssh-scp', 'dropbear'])
+ @OEHasPackage(['openssh-scp'])
def test_scp_file(self):
dst = '/tmp/test_scp_file'
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
index d707ab263a..8a0dbd0736 100644
--- a/meta/lib/oeqa/runtime/context.py
+++ b/meta/lib/oeqa/runtime/context.py
@@ -67,11 +67,11 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
% self.default_target_type)
runtime_group.add_argument('--target-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address and optionally ssh port (default 22) of device under test, for example '192.168.0.7:22'. Default: %s" \
% self.default_target_ip)
runtime_group.add_argument('--server-ip', action='store',
default=self.default_target_ip,
- help="IP address of device under test, default: %s" \
+ help="IP address of the test host from test target machine, default: %s" \
% self.default_server_ip)
runtime_group.add_argument('--host-dumper-dir', action='store',
diff --git a/meta/lib/oeqa/selftest/cases/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index e659be5341..0b88316950 100644
--- a/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -185,6 +185,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertTrue(find, "No version returned for searched recipe. bitbake output: %s" % result.output)
def test_prefile(self):
+ # Test when the prefile does not exist
+ result = runCmd('bitbake -r conf/prefile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified prefile didn't exist: %s" % result.output)
+ # Test when the prefile exists
preconf = os.path.join(self.builddir, 'conf/prefile.conf')
self.track_for_cleanup(preconf)
ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
@@ -195,6 +199,10 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
self.assertIn('localconf', result.output)
def test_postfile(self):
+ # Test when the postfile does not exist
+ result = runCmd('bitbake -R conf/postfile.conf', ignore_status=True)
+ self.assertEqual(1, result.status, "bitbake didn't error and should have when a specified postfile didn't exist: %s" % result.output)
+ # Test when the postfile exists
postconf = os.path.join(self.builddir, 'conf/postfile.conf')
self.track_for_cleanup(postconf)
ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
diff --git a/meta/lib/oeqa/selftest/cases/cve_check.py b/meta/lib/oeqa/selftest/cases/cve_check.py
index d1947baffc..22ffeffd29 100644
--- a/meta/lib/oeqa/selftest/cases/cve_check.py
+++ b/meta/lib/oeqa/selftest/cases/cve_check.py
@@ -1,9 +1,13 @@
-from oe.cve_check import Version
+import json
+import os
from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars
class CVECheck(OESelftestTestCase):
def test_version_compare(self):
+ from oe.cve_check import Version
+
result = Version("100") > Version("99")
self.assertTrue( result, msg="Failed to compare version '100' > '99'")
result = Version("2.3.1") > Version("2.2.3")
@@ -42,3 +46,175 @@ class CVECheck(OESelftestTestCase):
self.assertTrue( result ,msg="Failed to compare version with suffix '1.0p2' > '1.0p1'")
result = Version("1.0_patch2","patch") < Version("1.0_patch3","patch")
self.assertTrue( result ,msg="Failed to compare version with suffix '1.0_patch2' < '1.0_patch3'")
+
+
+ def test_convert_cve_version(self):
+ from oe.cve_check import convert_cve_version
+
+ # Default format
+ self.assertEqual(convert_cve_version("8.3"), "8.3")
+ self.assertEqual(convert_cve_version(""), "")
+
+ # OpenSSL format version
+ self.assertEqual(convert_cve_version("1.1.1t"), "1.1.1t")
+
+ # OpenSSH format
+ self.assertEqual(convert_cve_version("8.3_p1"), "8.3p1")
+ self.assertEqual(convert_cve_version("8.3_p22"), "8.3p22")
+
+ # Linux kernel format
+ self.assertEqual(convert_cve_version("6.2_rc8"), "6.2-rc8")
+ self.assertEqual(convert_cve_version("6.2_rc31"), "6.2-rc31")
+
+
+ def test_recipe_report_json(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "m4-native_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("m4-native -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "m4-native")
+ found_cves = { issue["id"]: issue["status"] for issue in package["issue"]}
+ self.assertIn("CVE-2008-1687", found_cves)
+ self.assertEqual(found_cves["CVE-2008-1687"], "Patched")
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
+
+
+ def test_image_json(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_DIR", "CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ report_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ print(report_json)
+ try:
+ os.remove(report_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("core-image-minimal-initramfs")
+ self.assertExists(report_json)
+
+ # Check that the summary report lists at least one package
+ with open(report_json) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertGreater(len(report["package"]), 1)
+
+ # Check that a random recipe wrote a recipe report to deploy/cve/
+ recipename = report["package"][0]["name"]
+ recipe_report = os.path.join(vars["CVE_CHECK_DIR"], recipename + "_cve.json")
+ self.assertExists(recipe_report)
+ with open(recipe_report) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ self.assertEqual(report["package"][0]["name"], recipename)
+
+
+ def test_recipe_report_json_unpatched(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+CVE_CHECK_REPORT_PATCHED = "0"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "m4-native_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("m4-native -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "m4-native")
+ #m4 had only Patched CVEs, so the issues array will be empty
+ self.assertEqual(package["issue"], [])
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
+
+
+ def test_recipe_report_json_ignored(self):
+ config = """
+INHERIT += "cve-check"
+CVE_CHECK_FORMAT_JSON = "1"
+CVE_CHECK_REPORT_PATCHED = "1"
+"""
+ self.write_config(config)
+
+ vars = get_bb_vars(["CVE_CHECK_SUMMARY_DIR", "CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ summary_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], vars["CVE_CHECK_SUMMARY_FILE_NAME_JSON"])
+ recipe_json = os.path.join(vars["CVE_CHECK_SUMMARY_DIR"], "logrotate_cve.json")
+
+ try:
+ os.remove(summary_json)
+ os.remove(recipe_json)
+ except FileNotFoundError:
+ pass
+
+ bitbake("logrotate -c cve_check")
+
+ def check_m4_json(filename):
+ with open(filename) as f:
+ report = json.load(f)
+ self.assertEqual(report["version"], "1")
+ self.assertEqual(len(report["package"]), 1)
+ package = report["package"][0]
+ self.assertEqual(package["name"], "logrotate")
+ found_cves = { issue["id"]: issue["status"] for issue in package["issue"]}
+ # m4 CVE should not be in logrotate
+ self.assertNotIn("CVE-2008-1687", found_cves)
+ # logrotate has both Patched and Ignored CVEs
+ self.assertIn("CVE-2011-1098", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1098"], "Patched")
+ self.assertIn("CVE-2011-1548", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1548"], "Ignored")
+ self.assertIn("CVE-2011-1549", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1549"], "Ignored")
+ self.assertIn("CVE-2011-1550", found_cves)
+ self.assertEqual(found_cves["CVE-2011-1550"], "Ignored")
+
+ self.assertExists(summary_json)
+ check_m4_json(summary_json)
+ self.assertExists(recipe_json)
+ check_m4_json(recipe_json)
diff --git a/meta/lib/oeqa/selftest/cases/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index 87e71632ab..9efe342a0d 100644
--- a/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -8,6 +8,7 @@ import shutil
import tempfile
import glob
import fnmatch
+import unittest
import oeqa.utils.ftools as ftools
from oeqa.selftest.case import OESelftestTestCase
@@ -38,6 +39,13 @@ def setUpModule():
canonical_layerpath = os.path.realpath(canonical_layerpath) + '/'
edited_layers.append(layerpath)
oldmetapath = os.path.realpath(layerpath)
+
+ # when downloading poky from tar.gz some tests will be skipped (BUG 12389)
+ try:
+ runCmd('git rev-parse --is-inside-work-tree', cwd=canonical_layerpath)
+ except:
+ raise unittest.SkipTest("devtool tests require folder to be a git repo")
+
result = runCmd('git rev-parse --show-toplevel', cwd=canonical_layerpath)
oldreporoot = result.output.rstrip()
newmetapath = os.path.join(corecopydir, os.path.relpath(oldmetapath, oldreporoot))
@@ -1323,7 +1331,7 @@ class DevtoolExtractTests(DevtoolBase):
# Now really test deploy-target
result = runCmd('devtool deploy-target -c %s root@%s' % (testrecipe, qemu.ip))
# Run a test command to see if it was installed properly
- sshargs = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ sshargs = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o HostKeyAlgorithms=+ssh-rsa'
result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand))
# Check if it deployed all of the files with the right ownership/perms
# First look on the host - need to do this under pseudo to get the correct ownership/perms
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
index cf8c92887b..c1f6e4c1fb 100644
--- a/meta/lib/oeqa/selftest/cases/glibc.py
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -41,7 +41,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
with contextlib.ExitStack() as s:
# use the base work dir, as the nfs mount, since the recipe directory may not exist
tmpdir = get_bb_var("BASE_WORKDIR")
- nfsport, mountport = s.enter_context(unfs_server(tmpdir))
+ nfsport, mountport = s.enter_context(unfs_server(tmpdir, udp = False))
# build core-image-minimal with required packages
default_installed_packages = [
@@ -61,7 +61,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
bitbake("core-image-minimal")
# start runqemu
- qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic"))
+ qemu = s.enter_context(runqemu("core-image-minimal", runqemuparams = "nographic", qemuparams = "-m 1024"))
# validate that SSH is working
status, _ = qemu.run("uname")
@@ -70,7 +70,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
# setup nfs mount
if qemu.run("mkdir -p \"{0}\"".format(tmpdir))[0] != 0:
raise Exception("Failed to setup NFS mount directory on target")
- mountcmd = "mount -o noac,nfsvers=3,port={0},udp,mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
+ mountcmd = "mount -o noac,nfsvers=3,port={0},mountport={1} \"{2}:{3}\" \"{3}\"".format(nfsport, mountport, qemu.server_ip, tmpdir)
status, output = qemu.run(mountcmd)
if status != 0:
raise Exception("Failed to setup NFS mount on target ({})".format(repr(output)))
diff --git a/meta/lib/oeqa/selftest/cases/oescripts.py b/meta/lib/oeqa/selftest/cases/oescripts.py
index 726daff7c6..fb99be447e 100644
--- a/meta/lib/oeqa/selftest/cases/oescripts.py
+++ b/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -133,7 +133,8 @@ class OEListPackageconfigTests(OEScriptTests):
def check_endlines(self, results, expected_endlines):
for line in results.output.splitlines():
for el in expected_endlines:
- if line.split() == el.split():
+ if line and line.split()[0] == el.split()[0] and \
+ ' '.join(sorted(el.split())) in ' '.join(sorted(line.split())):
expected_endlines.remove(el)
break
diff --git a/meta/lib/oeqa/selftest/cases/prservice.py b/meta/lib/oeqa/selftest/cases/prservice.py
index 578b2b4dd9..fdc1e40058 100644
--- a/meta/lib/oeqa/selftest/cases/prservice.py
+++ b/meta/lib/oeqa/selftest/cases/prservice.py
@@ -75,7 +75,7 @@ class BitbakePrTests(OESelftestTestCase):
exported_db_path = os.path.join(self.builddir, 'export.inc')
export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
- self.assertTrue(os.path.exists(exported_db_path))
+ self.assertTrue(os.path.exists(exported_db_path), msg="%s didn't exist, tool output %s" % (exported_db_path, export_result.output))
if replace_current_db:
current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index 4b606e7e64..be4cdcc429 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -39,7 +39,6 @@ exclude_packages = [
'gstreamer1.0-python',
'hwlatdetect',
'kernel-devsrc',
- 'libaprutil',
'libcap-ng',
'libjson',
'libproxy',
@@ -189,7 +188,7 @@ class ReproducibleTests(OESelftestTestCase):
def setUpLocal(self):
super().setUpLocal()
- needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS']
+ needed_vars = ['TOPDIR', 'TARGET_PREFIX', 'BB_NUMBER_THREADS', 'BB_HASHSERVE']
bb_vars = get_bb_vars(needed_vars)
for v in needed_vars:
setattr(self, v.lower(), bb_vars[v])
@@ -260,7 +259,7 @@ class ReproducibleTests(OESelftestTestCase):
# mirror, forcing a complete build from scratch
config += textwrap.dedent('''\
SSTATE_DIR = "${TMPDIR}/sstate"
- SSTATE_MIRRORS = ""
+ SSTATE_MIRRORS = "file://.*/.*-native.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH file://.*/.*-cross.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
''')
self.logger.info("Building %s (sstate%s allowed)..." % (name, '' if use_sstate else ' NOT'))
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index 20dc1c9482..cc4190c1d6 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -175,18 +175,24 @@ class TestImage(OESelftestTestCase):
if "DISPLAY" not in os.environ:
self.skipTest("virgl gtk test must be run inside a X session")
distro = oe.lsb.distro_identifier()
+ if distro and distro.startswith('almalinux'):
+ self.skipTest('virgl isn\'t working with Alma Linux')
+ if distro and distro.startswith('rocky'):
+ self.skipTest('virgl isn\'t working with Rocky Linux')
if distro and distro == 'debian-8':
self.skipTest('virgl isn\'t working with Debian 8')
if distro and distro == 'centos-7':
self.skipTest('virgl isn\'t working with Centos 7')
if distro and distro == 'centos-8':
self.skipTest('virgl isn\'t working with Centos 8')
- if distro and distro == 'fedora-34':
- self.skipTest('virgl isn\'t working with Fedora 34')
- if distro and distro == 'fedora-35':
- self.skipTest('virgl isn\'t working with Fedora 35')
+ if distro and distro.startswith('fedora'):
+ self.skipTest('virgl isn\'t working with Fedora')
if distro and distro == 'opensuseleap-15.0':
self.skipTest('virgl isn\'t working with Opensuse 15.0')
+ if distro and distro == 'ubuntu-22.04':
+ self.skipTest('virgl isn\'t working with Ubuntu 22.04')
+ if distro and distro == 'ubuntu-22.10':
+ self.skipTest('virgl isn\'t working with Ubuntu 22.10')
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
sdl_packageconfig = get_bb_var('PACKAGECONFIG', 'libsdl2-native')
@@ -230,7 +236,7 @@ class TestImage(OESelftestTestCase):
except FileNotFoundError:
self.skipTest("/dev/dri directory does not exist; no render nodes available on this machine.")
try:
- dripath = subprocess.check_output("pkg-config --variable=dridriverdir dri", shell=True)
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
except subprocess.CalledProcessError as e:
self.skipTest("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
qemu_packageconfig = get_bb_var('PACKAGECONFIG', 'qemu-system-native')
diff --git a/meta/lib/oeqa/selftest/cases/tinfoil.py b/meta/lib/oeqa/selftest/cases/tinfoil.py
index 686ce7e6b9..6668d7cdc8 100644
--- a/meta/lib/oeqa/selftest/cases/tinfoil.py
+++ b/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -65,6 +65,20 @@ class TinfoilTests(OESelftestTestCase):
localdata.setVar('PN', 'hello')
self.assertEqual('hello', localdata.getVar('BPN'))
+ # The config_data API tp parse_recipe_file is used by:
+ # layerindex-web layerindex/update_layer.py
+ def test_parse_recipe_custom_data(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ localdata.setVar("TESTVAR", "testval")
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3], config_data=localdata)
+ self.assertEqual("testval", rd.getVar('TESTVAR'))
+
def test_list_recipes(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
diff --git a/meta/lib/oeqa/utils/metadata.py b/meta/lib/oeqa/utils/metadata.py
index 8013aa684d..15ec190c4a 100644
--- a/meta/lib/oeqa/utils/metadata.py
+++ b/meta/lib/oeqa/utils/metadata.py
@@ -27,9 +27,9 @@ def metadata_from_bb():
data_dict = get_bb_vars()
# Distro information
- info_dict['distro'] = {'id': data_dict['DISTRO'],
- 'version_id': data_dict['DISTRO_VERSION'],
- 'pretty_name': '%s %s' % (data_dict['DISTRO'], data_dict['DISTRO_VERSION'])}
+ info_dict['distro'] = {'id': data_dict.get('DISTRO', 'NODISTRO'),
+ 'version_id': data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'),
+ 'pretty_name': '%s %s' % (data_dict.get('DISTRO', 'NODISTRO'), data_dict.get('DISTRO_VERSION', 'NO_DISTRO_VERSION'))}
# Host distro information
os_release = get_os_release()
diff --git a/meta/lib/oeqa/utils/nfs.py b/meta/lib/oeqa/utils/nfs.py
index a37686c914..c9bac050a4 100644
--- a/meta/lib/oeqa/utils/nfs.py
+++ b/meta/lib/oeqa/utils/nfs.py
@@ -8,7 +8,7 @@ from oeqa.utils.commands import bitbake, get_bb_var, Command
from oeqa.utils.network import get_free_port
@contextlib.contextmanager
-def unfs_server(directory, logger = None):
+def unfs_server(directory, logger = None, udp = True):
unfs_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "unfs3-native")
if not os.path.exists(os.path.join(unfs_sysroot, "usr", "bin", "unfsd")):
# build native tool
@@ -22,7 +22,7 @@ def unfs_server(directory, logger = None):
exports.write("{0} (rw,no_root_squash,no_all_squash,insecure)\n".format(directory).encode())
# find some ports for the server
- nfsport, mountport = get_free_port(udp = True), get_free_port(udp = True)
+ nfsport, mountport = get_free_port(udp), get_free_port(udp)
nenv = dict(os.environ)
nenv['PATH'] = "{0}/sbin:{0}/usr/sbin:{0}/usr/bin:".format(unfs_sysroot) + nenv.get('PATH', '')
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index de0dff3ff0..c84d299a80 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -432,10 +432,13 @@ class QemuRunner:
except OSError as e:
if e.errno != errno.ESRCH:
raise
- endtime = time.time() + self.runqemutime
- while self.runqemu.poll() is None and time.time() < endtime:
- time.sleep(1)
- if self.runqemu.poll() is None:
+ try:
+ outs, errs = self.runqemu.communicate(timeout = self.runqemutime)
+ if outs:
+ self.logger.info("Output from runqemu:\n%s", outs.decode("utf-8"))
+ if errs:
+ self.logger.info("Stderr from runqemu:\n%s", errs.decode("utf-8"))
+ except TimeoutExpired:
self.logger.debug("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
if not self.runqemu.stdout.closed:
diff --git a/meta/recipes-bsp/efivar/efivar_37.bb b/meta/recipes-bsp/efivar/efivar_37.bb
index fa1fe1ecdf..858c61ae6a 100644
--- a/meta/recipes-bsp/efivar/efivar_37.bb
+++ b/meta/recipes-bsp/efivar/efivar_37.bb
@@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=6626bb1e20189cfa95f2c508ba286393"
COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux"
-SRC_URI = "git://github.com/rhinstaller/efivar.git;branch=master;protocol=https \
+SRC_URI = "git://github.com/rhinstaller/efivar.git;branch=main;protocol=https \
file://determinism.patch \
file://no-werror.patch"
SRCREV = "c1d6b10e1ed4ba2be07f385eae5bceb694478a10"
diff --git a/meta/recipes-bsp/grub/files/CVE-2020-27749.patch b/meta/recipes-bsp/grub/files/CVE-2020-27749.patch
new file mode 100644
index 0000000000..a2566b2ded
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2020-27749.patch
@@ -0,0 +1,609 @@
+From 4ea7bae51f97e49c84dc67ea30b466ca8633b9f6 Mon Sep 17 00:00:00 2001
+From: Chris Coulson <chris.coulson@canonical.com>
+Date: Thu, 7 Jan 2021 19:21:03 +0000
+Subject: kern/parser: Fix a stack buffer overflow
+
+grub_parser_split_cmdline() expands variable names present in the supplied
+command line in to their corresponding variable contents and uses a 1 kiB
+stack buffer for temporary storage without sufficient bounds checking. If
+the function is called with a command line that references a variable with
+a sufficiently large payload, it is possible to overflow the stack
+buffer via tab completion, corrupt the stack frame and potentially
+control execution.
+
+Fixes: CVE-2020-27749
+
+Reported-by: Chris Coulson <chris.coulson@canonical.com>
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Signed-off-by: Darren Kenny <darren.kenny@oracle.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=c6c426e5ab6ea715153b72584de6bd8c82f698ec && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=b1c9e9e889e4273fb15712051c887e6078511448 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=3d157bbd06506b170fde5ec23980c4bf9f7660e2 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=8bc817014ce3d7a498db44eae33c8b90e2430926 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=030fb6c4fa354cdbd6a8d6903dfed5d36eaf3cb2 && https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=4ea7bae51f97e49c84dc67ea30b466ca8633b9f6]
+CVE: CVE-2020-27749
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/Makefile.core.def | 1 +
+ grub-core/kern/buffer.c | 117 +++++++++++++++++++++
+ grub-core/kern/parser.c | 204 +++++++++++++++++++++++-------------
+ include/grub/buffer.h | 144 +++++++++++++++++++++++++
+ 4 files changed, 395 insertions(+), 71 deletions(-)
+ create mode 100644 grub-core/kern/buffer.c
+ create mode 100644 include/grub/buffer.h
+
+diff --git a/grub-core/Makefile.core.def b/grub-core/Makefile.core.def
+index 651ea2a..823cd57 100644
+--- a/grub-core/Makefile.core.def
++++ b/grub-core/Makefile.core.def
+@@ -123,6 +123,7 @@ kernel = {
+ riscv32_efi_startup = kern/riscv/efi/startup.S;
+ riscv64_efi_startup = kern/riscv/efi/startup.S;
+
++ common = kern/buffer.c;
+ common = kern/command.c;
+ common = kern/corecmd.c;
+ common = kern/device.c;
+diff --git a/grub-core/kern/buffer.c b/grub-core/kern/buffer.c
+new file mode 100644
+index 0000000..9f5f8b8
+--- /dev/null
++++ b/grub-core/kern/buffer.c
+@@ -0,0 +1,117 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2021 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <grub/buffer.h>
++#include <grub/err.h>
++#include <grub/misc.h>
++#include <grub/mm.h>
++#include <grub/safemath.h>
++#include <grub/types.h>
++
++grub_buffer_t
++grub_buffer_new (grub_size_t sz)
++{
++ struct grub_buffer *ret;
++
++ ret = (struct grub_buffer *) grub_malloc (sizeof (*ret));
++ if (ret == NULL)
++ return NULL;
++
++ ret->data = (grub_uint8_t *) grub_malloc (sz);
++ if (ret->data == NULL)
++ {
++ grub_free (ret);
++ return NULL;
++ }
++
++ ret->sz = sz;
++ ret->pos = 0;
++ ret->used = 0;
++
++ return ret;
++}
++
++void
++grub_buffer_free (grub_buffer_t buf)
++{
++ grub_free (buf->data);
++ grub_free (buf);
++}
++
++grub_err_t
++grub_buffer_ensure_space (grub_buffer_t buf, grub_size_t req)
++{
++ grub_uint8_t *d;
++ grub_size_t newsz = 1;
++
++ /* Is the current buffer size adequate? */
++ if (buf->sz >= req)
++ return GRUB_ERR_NONE;
++
++ /* Find the smallest power-of-2 size that satisfies the request. */
++ while (newsz < req)
++ {
++ if (newsz == 0)
++ return grub_error (GRUB_ERR_OUT_OF_RANGE,
++ N_("requested buffer size is too large"));
++ newsz <<= 1;
++ }
++
++ d = (grub_uint8_t *) grub_realloc (buf->data, newsz);
++ if (d == NULL)
++ return grub_errno;
++
++ buf->data = d;
++ buf->sz = newsz;
++
++ return GRUB_ERR_NONE;
++}
++
++void *
++grub_buffer_take_data (grub_buffer_t buf)
++{
++ void *data = buf->data;
++
++ buf->data = NULL;
++ buf->sz = buf->pos = buf->used = 0;
++
++ return data;
++}
++
++void
++grub_buffer_reset (grub_buffer_t buf)
++{
++ buf->pos = buf->used = 0;
++}
++
++grub_err_t
++grub_buffer_advance_read_pos (grub_buffer_t buf, grub_size_t n)
++{
++ grub_size_t newpos;
++
++ if (grub_add (buf->pos, n, &newpos))
++ return grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
++
++ if (newpos > buf->used)
++ return grub_error (GRUB_ERR_OUT_OF_RANGE,
++ N_("new read is position beyond the end of the written data"));
++
++ buf->pos = newpos;
++
++ return GRUB_ERR_NONE;
++}
+diff --git a/grub-core/kern/parser.c b/grub-core/kern/parser.c
+index d1cf061..6ab7aa4 100644
+--- a/grub-core/kern/parser.c
++++ b/grub-core/kern/parser.c
+@@ -1,7 +1,7 @@
+ /* parser.c - the part of the parser that can return partial tokens */
+ /*
+ * GRUB -- GRand Unified Bootloader
+- * Copyright (C) 2005,2007,2009 Free Software Foundation, Inc.
++ * Copyright (C) 2005,2007,2009,2021 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -18,6 +18,7 @@
+ */
+
+ #include <grub/parser.h>
++#include <grub/buffer.h>
+ #include <grub/env.h>
+ #include <grub/misc.h>
+ #include <grub/mm.h>
+@@ -107,8 +108,8 @@ check_varstate (grub_parser_state_t s)
+ }
+
+
+-static void
+-add_var (char *varname, char **bp, char **vp,
++static grub_err_t
++add_var (grub_buffer_t varname, grub_buffer_t buf,
+ grub_parser_state_t state, grub_parser_state_t newstate)
+ {
+ const char *val;
+@@ -116,17 +117,74 @@ add_var (char *varname, char **bp, char **vp,
+ /* Check if a variable was being read in and the end of the name
+ was reached. */
+ if (!(check_varstate (state) && !check_varstate (newstate)))
+- return;
++ return GRUB_ERR_NONE;
++
++ if (grub_buffer_append_char (varname, '\0') != GRUB_ERR_NONE)
++ return grub_errno;
+
+- *((*vp)++) = '\0';
+- val = grub_env_get (varname);
+- *vp = varname;
++ val = grub_env_get ((const char *) grub_buffer_peek_data (varname));
++ grub_buffer_reset (varname);
+ if (!val)
+- return;
++ return GRUB_ERR_NONE;
+
+ /* Insert the contents of the variable in the buffer. */
+- for (; *val; val++)
+- *((*bp)++) = *val;
++ return grub_buffer_append_data (buf, val, grub_strlen (val));
++}
++
++static grub_err_t
++terminate_arg (grub_buffer_t buffer, int *argc)
++{
++ grub_size_t unread = grub_buffer_get_unread_bytes (buffer);
++
++ if (unread == 0)
++ return GRUB_ERR_NONE;
++
++ if (*(const char *) grub_buffer_peek_data_at (buffer, unread - 1) == '\0')
++ return GRUB_ERR_NONE;
++
++ if (grub_buffer_append_char (buffer, '\0') != GRUB_ERR_NONE)
++ return grub_errno;
++
++ (*argc)++;
++
++ return GRUB_ERR_NONE;
++}
++
++static grub_err_t
++process_char (char c, grub_buffer_t buffer, grub_buffer_t varname,
++ grub_parser_state_t state, int *argc,
++ grub_parser_state_t *newstate)
++{
++ char use;
++
++ *newstate = grub_parser_cmdline_state (state, c, &use);
++
++ /*
++ * If a variable was being processed and this character does
++ * not describe the variable anymore, write the variable to
++ * the buffer.
++ */
++ if (add_var (varname, buffer, state, *newstate) != GRUB_ERR_NONE)
++ return grub_errno;
++
++ if (check_varstate (*newstate))
++ {
++ if (use)
++ return grub_buffer_append_char (varname, use);
++ }
++ else if (*newstate == GRUB_PARSER_STATE_TEXT &&
++ state != GRUB_PARSER_STATE_ESC && grub_isspace (use))
++ {
++ /*
++ * Don't add more than one argument if multiple
++ * spaces are used.
++ */
++ return terminate_arg (buffer, argc);
++ }
++ else if (use)
++ return grub_buffer_append_char (buffer, use);
++
++ return GRUB_ERR_NONE;
+ }
+
+ grub_err_t
+@@ -135,24 +193,36 @@ grub_parser_split_cmdline (const char *cmdline,
+ int *argc, char ***argv)
+ {
+ grub_parser_state_t state = GRUB_PARSER_STATE_TEXT;
+- /* XXX: Fixed size buffer, perhaps this buffer should be dynamically
+- allocated. */
+- char buffer[1024];
+- char *bp = buffer;
++ grub_buffer_t buffer, varname;
+ char *rd = (char *) cmdline;
+- char varname[200];
+- char *vp = varname;
+- char *args;
++ char *rp = rd;
+ int i;
+
+ *argc = 0;
+ *argv = NULL;
++
++ buffer = grub_buffer_new (1024);
++ if (buffer == NULL)
++ return grub_errno;
++
++ varname = grub_buffer_new (200);
++ if (varname == NULL)
++ goto fail;
++
+ do
+ {
+- if (!rd || !*rd)
++ if (rp == NULL || *rp == '\0')
+ {
++ if (rd != cmdline)
++ {
++ grub_free (rd);
++ rd = rp = NULL;
++ }
+ if (getline)
+- getline (&rd, 1, getline_data);
++ {
++ getline (&rd, 1, getline_data);
++ rp = rd;
++ }
+ else
+ break;
+ }
+@@ -160,39 +230,14 @@ grub_parser_split_cmdline (const char *cmdline,
+ if (!rd)
+ break;
+
+- for (; *rd; rd++)
++ for (; *rp != '\0'; rp++)
+ {
+ grub_parser_state_t newstate;
+- char use;
+
+- newstate = grub_parser_cmdline_state (state, *rd, &use);
++ if (process_char (*rp, buffer, varname, state, argc,
++ &newstate) != GRUB_ERR_NONE)
++ goto fail;
+
+- /* If a variable was being processed and this character does
+- not describe the variable anymore, write the variable to
+- the buffer. */
+- add_var (varname, &bp, &vp, state, newstate);
+-
+- if (check_varstate (newstate))
+- {
+- if (use)
+- *(vp++) = use;
+- }
+- else
+- {
+- if (newstate == GRUB_PARSER_STATE_TEXT
+- && state != GRUB_PARSER_STATE_ESC && grub_isspace (use))
+- {
+- /* Don't add more than one argument if multiple
+- spaces are used. */
+- if (bp != buffer && *(bp - 1))
+- {
+- *(bp++) = '\0';
+- (*argc)++;
+- }
+- }
+- else if (use)
+- *(bp++) = use;
+- }
+ state = newstate;
+ }
+ }
+@@ -200,43 +245,60 @@ grub_parser_split_cmdline (const char *cmdline,
+
+ /* A special case for when the last character was part of a
+ variable. */
+- add_var (varname, &bp, &vp, state, GRUB_PARSER_STATE_TEXT);
++ if (add_var (varname, buffer, state, GRUB_PARSER_STATE_TEXT) != GRUB_ERR_NONE)
++ goto fail;
+
+- if (bp != buffer && *(bp - 1))
+- {
+- *(bp++) = '\0';
+- (*argc)++;
+- }
++ /* Ensure that the last argument is terminated. */
++ if (terminate_arg (buffer, argc) != GRUB_ERR_NONE)
++ goto fail;
+
+ /* If there are no args, then we're done. */
+ if (!*argc)
+- return 0;
+-
+- /* Reserve memory for the return values. */
+- args = grub_malloc (bp - buffer);
+- if (!args)
+- return grub_errno;
+- grub_memcpy (args, buffer, bp - buffer);
++ {
++ grub_errno = GRUB_ERR_NONE;
++ goto out;
++ }
+
+ *argv = grub_calloc (*argc + 1, sizeof (char *));
+ if (!*argv)
+- {
+- grub_free (args);
+- return grub_errno;
+- }
++ goto fail;
+
+ /* The arguments are separated with 0's, setup argv so it points to
+ the right values. */
+- bp = args;
+ for (i = 0; i < *argc; i++)
+ {
+- (*argv)[i] = bp;
+- while (*bp)
+- bp++;
+- bp++;
++ char *arg;
++
++ if (i > 0)
++ {
++ if (grub_buffer_advance_read_pos (buffer, 1) != GRUB_ERR_NONE)
++ goto fail;
++ }
++
++ arg = (char *) grub_buffer_peek_data (buffer);
++ if (arg == NULL ||
++ grub_buffer_advance_read_pos (buffer, grub_strlen (arg)) != GRUB_ERR_NONE)
++ goto fail;
++
++ (*argv)[i] = arg;
+ }
+
+- return 0;
++ /* Keep memory for the return values. */
++ grub_buffer_take_data (buffer);
++
++ grub_errno = GRUB_ERR_NONE;
++
++ out:
++ if (rd != cmdline)
++ grub_free (rd);
++ grub_buffer_free (buffer);
++ grub_buffer_free (varname);
++
++ return grub_errno;
++
++ fail:
++ grub_free (*argv);
++ goto out;
+ }
+
+ /* Helper for grub_parser_execute. */
+diff --git a/include/grub/buffer.h b/include/grub/buffer.h
+new file mode 100644
+index 0000000..f4b10cf
+--- /dev/null
++++ b/include/grub/buffer.h
+@@ -0,0 +1,144 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2021 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef GRUB_BUFFER_H
++#define GRUB_BUFFER_H 1
++
++#include <grub/err.h>
++#include <grub/misc.h>
++#include <grub/mm.h>
++#include <grub/safemath.h>
++#include <grub/types.h>
++
++struct grub_buffer
++{
++ grub_uint8_t *data;
++ grub_size_t sz;
++ grub_size_t pos;
++ grub_size_t used;
++};
++
++/*
++ * grub_buffer_t represents a simple variable sized byte buffer with
++ * read and write cursors. It currently only implements
++ * functionality required by the only user in GRUB (append byte[s],
++ * peeking data at a specified position and updating the read cursor.
++ * Some things that this doesn't do yet are:
++ * - Reading a portion of the buffer by copying data from the current
++ * read position in to a caller supplied destination buffer and then
++ * automatically updating the read cursor.
++ * - Dropping the read part at the start of the buffer when an append
++ * requires more space.
++ */
++typedef struct grub_buffer *grub_buffer_t;
++
++/* Allocate a new buffer with the specified initial size. */
++extern grub_buffer_t grub_buffer_new (grub_size_t sz);
++
++/* Free the buffer and its resources. */
++extern void grub_buffer_free (grub_buffer_t buf);
++
++/* Return the number of unread bytes in this buffer. */
++static inline grub_size_t
++grub_buffer_get_unread_bytes (grub_buffer_t buf)
++{
++ return buf->used - buf->pos;
++}
++
++/*
++ * Ensure that the buffer size is at least the requested
++ * number of bytes.
++ */
++extern grub_err_t grub_buffer_ensure_space (grub_buffer_t buf, grub_size_t req);
++
++/*
++ * Append the specified number of bytes from the supplied
++ * data to the buffer.
++ */
++static inline grub_err_t
++grub_buffer_append_data (grub_buffer_t buf, const void *data, grub_size_t len)
++{
++ grub_size_t req;
++
++ if (grub_add (buf->used, len, &req))
++ return grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
++
++ if (grub_buffer_ensure_space (buf, req) != GRUB_ERR_NONE)
++ return grub_errno;
++
++ grub_memcpy (&buf->data[buf->used], data, len);
++ buf->used = req;
++
++ return GRUB_ERR_NONE;
++}
++
++/* Append the supplied character to the buffer. */
++static inline grub_err_t
++grub_buffer_append_char (grub_buffer_t buf, char c)
++{
++ return grub_buffer_append_data (buf, &c, 1);
++}
++
++/*
++ * Forget and return the underlying data buffer. The caller
++ * becomes the owner of this buffer, and must free it when it
++ * is no longer required.
++ */
++extern void *grub_buffer_take_data (grub_buffer_t buf);
++
++/* Reset this buffer. Note that this does not deallocate any resources. */
++void grub_buffer_reset (grub_buffer_t buf);
++
++/*
++ * Return a pointer to the underlying data buffer at the specified
++ * offset from the current read position. Note that this pointer may
++ * become invalid if the buffer is mutated further.
++ */
++static inline void *
++grub_buffer_peek_data_at (grub_buffer_t buf, grub_size_t off)
++{
++ if (grub_add (buf->pos, off, &off))
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected."));
++ return NULL;
++ }
++
++ if (off >= buf->used)
++ {
++ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("peek out of range"));
++ return NULL;
++ }
++
++ return &buf->data[off];
++}
++
++/*
++ * Return a pointer to the underlying data buffer at the current
++ * read position. Note that this pointer may become invalid if the
++ * buffer is mutated further.
++ */
++static inline void *
++grub_buffer_peek_data (grub_buffer_t buf)
++{
++ return grub_buffer_peek_data_at (buf, 0);
++}
++
++/* Advance the read position by the specified number of bytes. */
++extern grub_err_t grub_buffer_advance_read_pos (grub_buffer_t buf, grub_size_t n);
++
++#endif /* GRUB_BUFFER_H */
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-20225.patch b/meta/recipes-bsp/grub/files/CVE-2021-20225.patch
new file mode 100644
index 0000000000..b864febe62
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-20225.patch
@@ -0,0 +1,58 @@
+From 2a330dba93ff11bc00eda76e9419bc52b0c7ead6 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 22 Jan 2021 16:07:29 +1100
+Subject: lib/arg: Block repeated short options that require an argument
+
+Fuzzing found the following crash:
+
+ search -hhhhhhhhhhhhhf
+
+We didn't allocate enough option space for 13 hints because the
+allocation code counts the number of discrete arguments (i.e. argc).
+However, the shortopt parsing code will happily keep processing
+a combination of short options without checking if those short
+options require an argument. This means you can easily end writing
+past the allocated option space.
+
+This fixes a OOB write which can cause heap corruption.
+
+Fixes: CVE-2021-20225
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=2a330dba93ff11bc00eda76e9419bc52b0c7ead6]
+CVE: CVE-2021-20225
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/lib/arg.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/grub-core/lib/arg.c b/grub-core/lib/arg.c
+index 3288609..537c5e9 100644
+--- a/grub-core/lib/arg.c
++++ b/grub-core/lib/arg.c
+@@ -299,6 +299,19 @@ grub_arg_parse (grub_extcmd_t cmd, int argc, char **argv,
+ it can have an argument value. */
+ if (*curshort)
+ {
++ /*
++ * Only permit further short opts if this one doesn't
++ * require a value.
++ */
++ if (opt->type != ARG_TYPE_NONE &&
++ !(opt->flags & GRUB_ARG_OPTION_OPTIONAL))
++ {
++ grub_error (GRUB_ERR_BAD_ARGUMENT,
++ N_("missing mandatory option for `%s'"),
++ opt->longarg);
++ goto fail;
++ }
++
+ if (parse_option (cmd, opt, 0, usr) || grub_errno)
+ goto fail;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-20233.patch b/meta/recipes-bsp/grub/files/CVE-2021-20233.patch
new file mode 100644
index 0000000000..d2069afc18
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-20233.patch
@@ -0,0 +1,50 @@
+From 2f533a89a8dfcacbf2c9dbc77d910f111f24bf33 Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 22 Jan 2021 17:10:48 +1100
+Subject: commands/menuentry: Fix quoting in setparams_prefix()
+
+Commit 9acdcbf32542 (use single quotes in menuentry setparams command)
+says that expressing a quoted single quote will require 3 characters. It
+actually requires (and always did require!) 4 characters:
+
+ str: a'b => a'\''b
+ len: 3 => 6 (2 for the letters + 4 for the quote)
+
+This leads to not allocating enough memory and thus out of bounds writes
+that have been observed to cause heap corruption.
+
+Allocate 4 bytes for each single quote.
+
+Commit 22e7dbb2bb81 (Fix quoting in legacy parser.) does the same
+quoting, but it adds 3 as extra overhead on top of the single byte that
+the quote already needs. So it's correct.
+
+Fixes: 9acdcbf32542 (use single quotes in menuentry setparams command)
+Fixes: CVE-2021-20233
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?h=grub-2.06&id=2f533a89a8dfcacbf2c9dbc77d910f111f24bf33]
+CVE: CVE-2021-20233
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/commands/menuentry.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/grub-core/commands/menuentry.c b/grub-core/commands/menuentry.c
+index 9164df7..720e6d8 100644
+--- a/grub-core/commands/menuentry.c
++++ b/grub-core/commands/menuentry.c
+@@ -230,7 +230,7 @@ setparams_prefix (int argc, char **args)
+ len += 3; /* 3 = 1 space + 2 quotes */
+ p = args[i];
+ while (*p)
+- len += (*p++ == '\'' ? 3 : 1);
++ len += (*p++ == '\'' ? 4 : 1);
+ }
+
+ result = grub_malloc (len + 2);
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3695.patch b/meta/recipes-bsp/grub/files/CVE-2021-3695.patch
new file mode 100644
index 0000000000..7d6e805725
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3695.patch
@@ -0,0 +1,178 @@
+From 0693d672abcf720419f86c56bda6428c540e2bb1 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 20 Jul 2022 10:01:35 +0530
+Subject: [PATCH] CVE-2021-3695
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=e623866d9286410156e8b9d2c82d6253a1b22d08]
+CVE: CVE-2021-3695
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+ video/readers/png: Drop greyscale support to fix heap out-of-bounds write
+
+A 16-bit greyscale PNG without alpha is processed in the following loop:
+
+ for (i = 0; i < (data->image_width * data->image_height);
+ i++, d1 += 4, d2 += 2)
+{
+ d1[R3] = d2[1];
+ d1[G3] = d2[1];
+ d1[B3] = d2[1];
+}
+
+The increment of d1 is wrong. d1 is incremented by 4 bytes per iteration,
+but there are only 3 bytes allocated for storage. This means that image
+data will overwrite somewhat-attacker-controlled parts of memory - 3 bytes
+out of every 4 following the end of the image.
+
+This has existed since greyscale support was added in 2013 in commit
+3ccf16dff98f (grub-core/video/readers/png.c: Support grayscale).
+
+Saving starfield.png as a 16-bit greyscale image without alpha in the gimp
+and attempting to load it causes grub-emu to crash - I don't think this code
+has ever worked.
+
+Delete all PNG greyscale support.
+
+Fixes: CVE-2021-3695
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/png.c | 89 ++++-------------------------------
+ 1 file changed, 8 insertions(+), 81 deletions(-)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 0157ff7..db4a9d4 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -100,7 +100,7 @@ struct grub_png_data
+
+ unsigned image_width, image_height;
+ int bpp, is_16bit;
+- int raw_bytes, is_gray, is_alpha, is_palette;
++ int raw_bytes, is_alpha, is_palette;
+ int row_bytes, color_bits;
+ grub_uint8_t *image_data;
+
+@@ -280,13 +280,13 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ data->bpp = 3;
+ else
+ {
+- data->is_gray = 1;
+- data->bpp = 1;
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: color type not supported");
+ }
+
+ if ((color_bits != 8) && (color_bits != 16)
+ && (color_bits != 4
+- || !(data->is_gray || data->is_palette)))
++ || !data->is_palette))
+ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
+ "png: bit depth must be 8 or 16");
+
+@@ -315,7 +315,7 @@ grub_png_decode_image_header (struct grub_png_data *data)
+ }
+
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+- if (data->is_16bit || data->is_gray || data->is_palette)
++ if (data->is_16bit || data->is_palette)
+ #endif
+ {
+ data->image_data = grub_calloc (data->image_height, data->row_bytes);
+@@ -859,27 +859,8 @@ grub_png_convert_image (struct grub_png_data *data)
+ int shift;
+ int mask = (1 << data->color_bits) - 1;
+ unsigned j;
+- if (data->is_gray)
+- {
+- /* Generic formula is
+- (0xff * i) / ((1U << data->color_bits) - 1)
+- but for allowed bit depth of 1, 2 and for it's
+- equivalent to
+- (0xff / ((1U << data->color_bits) - 1)) * i
+- Precompute the multipliers to avoid division.
+- */
+-
+- const grub_uint8_t multipliers[5] = { 0xff, 0xff, 0x55, 0x24, 0x11 };
+- for (i = 0; i < (1U << data->color_bits); i++)
+- {
+- grub_uint8_t col = multipliers[data->color_bits] * i;
+- palette[i][0] = col;
+- palette[i][1] = col;
+- palette[i][2] = col;
+- }
+- }
+- else
+- grub_memcpy (palette, data->palette, 3 << data->color_bits);
++
++ grub_memcpy (palette, data->palette, 3 << data->color_bits);
+ d1c = d1;
+ d2c = d2;
+ for (j = 0; j < data->image_height; j++, d1c += data->image_width * 3,
+@@ -917,61 +898,7 @@ grub_png_convert_image (struct grub_png_data *data)
+ return;
+ }
+
+- if (data->is_gray)
+- {
+- switch (data->bpp)
+- {
+- case 4:
+- /* 16-bit gray with alpha. */
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 4)
+- {
+- d1[R4] = d2[3];
+- d1[G4] = d2[3];
+- d1[B4] = d2[3];
+- d1[A4] = d2[1];
+- }
+- break;
+- case 2:
+- if (data->is_16bit)
+- /* 16-bit gray without alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R3] = d2[1];
+- d1[G3] = d2[1];
+- d1[B3] = d2[1];
+- }
+- }
+- else
+- /* 8-bit gray with alpha. */
+- {
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 4, d2 += 2)
+- {
+- d1[R4] = d2[1];
+- d1[G4] = d2[1];
+- d1[B4] = d2[1];
+- d1[A4] = d2[0];
+- }
+- }
+- break;
+- /* 8-bit gray without alpha. */
+- case 1:
+- for (i = 0; i < (data->image_width * data->image_height);
+- i++, d1 += 3, d2++)
+- {
+- d1[R3] = d2[0];
+- d1[G3] = d2[0];
+- d1[B3] = d2[0];
+- }
+- break;
+- }
+- return;
+- }
+-
+- {
++ {
+ /* Only copy the upper 8 bit. */
+ #ifndef GRUB_CPU_WORDS_BIGENDIAN
+ for (i = 0; i < (data->image_width * data->image_height * data->bpp >> 1);
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3696.patch b/meta/recipes-bsp/grub/files/CVE-2021-3696.patch
new file mode 100644
index 0000000000..ef6da945c4
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3696.patch
@@ -0,0 +1,46 @@
+From b18ce59d6496a9313d75f9497a0efac61dcf4191 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 20 Jul 2022 10:05:42 +0530
+Subject: [PATCH] CVE-2021-3696
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=210245129c932dc9e1c2748d9d35524fb95b5042]
+CVE: CVE-2021-3696
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+video/readers/png: Avoid heap OOB R/W inserting huff table items
+
+In fuzzing we observed crashes where a code would attempt to be inserted
+into a huffman table before the start, leading to a set of heap OOB reads
+and writes as table entries with negative indices were shifted around and
+the new code written in.
+
+Catch the case where we would underflow the array and bail.
+
+Fixes: CVE-2021-3696
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/png.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/grub-core/video/readers/png.c b/grub-core/video/readers/png.c
+index 36b3f10..3c05951 100644
+--- a/grub-core/video/readers/png.c
++++ b/grub-core/video/readers/png.c
+@@ -416,6 +416,13 @@ grub_png_insert_huff_item (struct huff_table *ht, int code, int len)
+ for (i = len; i < ht->max_length; i++)
+ n += ht->maxval[i];
+
++ if (n > ht->num_values)
++ {
++ grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "png: out of range inserting huffman table item");
++ return;
++ }
++
+ for (i = 0; i < n; i++)
+ ht->values[ht->num_values - i] = ht->values[ht->num_values - i - 1];
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3697.patch b/meta/recipes-bsp/grub/files/CVE-2021-3697.patch
new file mode 100644
index 0000000000..be15e7d1f2
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3697.patch
@@ -0,0 +1,82 @@
+From 4de9de9d14f4ac27229e45514627534e32cc4406 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 19 Jul 2022 11:13:02 +0530
+Subject: [PATCH] CVE-2021-3697
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=22a3f97d39f6a10b08ad7fd1cc47c4dcd10413f6]
+CVE: CVE-2021-3697
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+video/readers/jpeg: Block int underflow -> wild pointer write
+
+Certain 1 px wide images caused a wild pointer write in
+grub_jpeg_ycrcb_to_rgb(). This was caused because in grub_jpeg_decode_data(),
+we have the following loop:
+
+for (; data->r1 < nr1 && (!data->dri || rst);
+ data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
+
+We did not check if vb * width >= hb * nc1.
+
+On a 64-bit platform, if that turns out to be negative, it will underflow,
+be interpreted as unsigned 64-bit, then be added to the 64-bit pointer, so
+we see data->bitmap_ptr jump, e.g.:
+
+0x6180_0000_0480 to
+0x6181_0000_0498
+ ^
+ ~--- carry has occurred and this pointer is now far away from
+ any object.
+
+On a 32-bit platform, it will decrement the pointer, creating a pointer
+that won't crash but will overwrite random data.
+
+Catch the underflow and error out.
+
+Fixes: CVE-2021-3697
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/video/readers/jpeg.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/video/readers/jpeg.c b/grub-core/video/readers/jpeg.c
+index 31359a4..545a60b 100644
+--- a/grub-core/video/readers/jpeg.c
++++ b/grub-core/video/readers/jpeg.c
+@@ -23,6 +23,7 @@
+ #include <grub/mm.h>
+ #include <grub/misc.h>
+ #include <grub/bufio.h>
++#include <grub/safemath.h>
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+@@ -617,6 +618,7 @@ static grub_err_t
+ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ {
+ unsigned c1, vb, hb, nr1, nc1;
++ unsigned stride_a, stride_b, stride;
+ int rst = data->dri;
+
+ vb = 8 << data->log_vs;
+@@ -624,8 +626,14 @@ grub_jpeg_decode_data (struct grub_jpeg_data *data)
+ nr1 = (data->image_height + vb - 1) >> (3 + data->log_vs);
+ nc1 = (data->image_width + hb - 1) >> (3 + data->log_hs);
+
++ if (grub_mul(vb, data->image_width, &stride_a) ||
++ grub_mul(hb, nc1, &stride_b) ||
++ grub_sub(stride_a, stride_b, &stride))
++ return grub_error (GRUB_ERR_BAD_FILE_TYPE,
++ "jpeg: cannot decode image with these dimensions");
++
+ for (; data->r1 < nr1 && (!data->dri || rst);
+- data->r1++, data->bitmap_ptr += (vb * data->image_width - hb * nc1) * 3)
++ data->r1++, data->bitmap_ptr += stride * 3)
+ for (c1 = 0; c1 < nc1 && (!data->dri || rst);
+ c1++, rst--, data->bitmap_ptr += hb * 3)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2021-3981.patch b/meta/recipes-bsp/grub/files/CVE-2021-3981.patch
new file mode 100644
index 0000000000..e27027ea65
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2021-3981.patch
@@ -0,0 +1,32 @@
+From 67740c43c9326956ea5cd6be77f813b5499a56a5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 27 Jun 2022 10:15:29 +0530
+Subject: [PATCH] CVE-2021-3981
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/diff/util/grub-mkconfig.in?id=0adec29674561034771c13e446069b41ef41e4d4]
+CVE: CVE-2021-3981
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ util/grub-mkconfig.in | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/util/grub-mkconfig.in b/util/grub-mkconfig.in
+index 9f477ff..ead94a6 100644
+--- a/util/grub-mkconfig.in
++++ b/util/grub-mkconfig.in
+@@ -287,7 +287,11 @@ and /etc/grub.d/* files or please file a bug report with
+ exit 1
+ else
+ # none of the children aborted with error, install the new grub.cfg
+- mv -f ${grub_cfg}.new ${grub_cfg}
++ oldumask=$(umask)
++ umask 077
++ cat ${grub_cfg}.new > ${grub_cfg}
++ umask $oldumask
++ rm -f ${grub_cfg}.new
+ fi
+ fi
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-2601.patch b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
new file mode 100644
index 0000000000..090f693be3
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-2601.patch
@@ -0,0 +1,87 @@
+From e8060722acf0bcca037982d7fb29472363ccdfd4 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 01:58:27 +0800
+Subject: [PATCH] font: Fix several integer overflows in
+ grub_font_construct_glyph()
+
+This patch fixes several integer overflows in grub_font_construct_glyph().
+Glyphs of invalid size, zero or leading to an overflow, are rejected.
+The inconsistency between "glyph" and "max_glyph_size" when grub_malloc()
+returns NULL is fixed too.
+
+Fixes: CVE-2022-2601
+
+Reported-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=768e1ef2fc159f6e14e7246e4be09363708ac39e]
+CVE: CVE-2022-2601
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index df17dba..f110db9 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1509,6 +1509,7 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ struct grub_video_signed_rect bounds;
+ static struct grub_font_glyph *glyph = 0;
+ static grub_size_t max_glyph_size = 0;
++ grub_size_t cur_glyph_size;
+
+ ensure_comb_space (glyph_id);
+
+@@ -1525,29 +1526,33 @@ grub_font_construct_glyph (grub_font_t hinted_font,
+ if (!glyph_id->ncomb && !glyph_id->attributes)
+ return main_glyph;
+
+- if (max_glyph_size < sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT)
++ if (grub_video_bitmap_calc_1bpp_bufsz (bounds.width, bounds.height, &cur_glyph_size) ||
++ grub_add (sizeof (*glyph), cur_glyph_size, &cur_glyph_size))
++ return main_glyph;
++
++ if (max_glyph_size < cur_glyph_size)
+ {
+ grub_free (glyph);
+- max_glyph_size = (sizeof (*glyph) + (bounds.width * bounds.height + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT) * 2;
+- if (max_glyph_size < 8)
+- max_glyph_size = 8;
+- glyph = grub_malloc (max_glyph_size);
++ if (grub_mul (cur_glyph_size, 2, &max_glyph_size))
++ max_glyph_size = 0;
++ glyph = max_glyph_size > 0 ? grub_malloc (max_glyph_size) : NULL;
+ }
+ if (!glyph)
+ {
++ max_glyph_size = 0;
+ grub_errno = GRUB_ERR_NONE;
+ return main_glyph;
+ }
+
+- grub_memset (glyph, 0, sizeof (*glyph)
+- + (bounds.width * bounds.height
+- + GRUB_CHAR_BIT - 1) / GRUB_CHAR_BIT);
++ grub_memset (glyph, 0, cur_glyph_size);
+
+ glyph->font = main_glyph->font;
+- glyph->width = bounds.width;
+- glyph->height = bounds.height;
+- glyph->offset_x = bounds.x;
+- glyph->offset_y = bounds.y;
++ if (bounds.width == 0 || bounds.height == 0 ||
++ grub_cast (bounds.width, &glyph->width) ||
++ grub_cast (bounds.height, &glyph->height) ||
++ grub_cast (bounds.x, &glyph->offset_x) ||
++ grub_cast (bounds.y, &glyph->offset_y))
++ return main_glyph;
+
+ if (glyph_id->attributes & GRUB_UNICODE_GLYPH_ATTRIBUTE_MIRROR)
+ grub_font_blit_glyph_mirror (glyph, main_glyph,
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28733.patch b/meta/recipes-bsp/grub/files/CVE-2022-28733.patch
new file mode 100644
index 0000000000..6cfdf20e2d
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28733.patch
@@ -0,0 +1,60 @@
+From 415fb5eb83cbd3b5cfc25ac1290f2de4fe3d231c Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 10:48:34 +0530
+Subject: [PATCH] CVE-2022-28733
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=3e4817538de828319ba6d59ced2fbb9b5ca13287]
+CVE: CVE-2022-28733
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/ip: Do IP fragment maths safely
+
+We can receive packets with invalid IP fragmentation information. This
+can lead to rsm->total_len underflowing and becoming very large.
+
+Then, in grub_netbuff_alloc(), we add to this very large number, which can
+cause it to overflow and wrap back around to a small positive number.
+The allocation then succeeds, but the resulting buffer is too small and
+subsequent operations can write past the end of the buffer.
+
+Catch the underflow here.
+
+Fixes: CVE-2022-28733
+
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/net/ip.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/net/ip.c b/grub-core/net/ip.c
+index ea5edf8..74e4e8b 100644
+--- a/grub-core/net/ip.c
++++ b/grub-core/net/ip.c
+@@ -25,6 +25,7 @@
+ #include <grub/net/netbuff.h>
+ #include <grub/mm.h>
+ #include <grub/priority_queue.h>
++#include <grub/safemath.h>
+ #include <grub/time.h>
+
+ struct iphdr {
+@@ -512,7 +513,14 @@ grub_net_recv_ip4_packets (struct grub_net_buff *nb,
+ {
+ rsm->total_len = (8 * (grub_be_to_cpu16 (iph->frags) & OFFSET_MASK)
+ + (nb->tail - nb->data));
+- rsm->total_len -= ((iph->verhdrlen & 0xf) * sizeof (grub_uint32_t));
++
++ if (grub_sub (rsm->total_len, (iph->verhdrlen & 0xf) * sizeof (grub_uint32_t),
++ &rsm->total_len))
++ {
++ grub_dprintf ("net", "IP reassembly size underflow\n");
++ return GRUB_ERR_NONE;
++ }
++
+ rsm->asm_netbuff = grub_netbuff_alloc (rsm->total_len);
+ if (!rsm->asm_netbuff)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28734.patch b/meta/recipes-bsp/grub/files/CVE-2022-28734.patch
new file mode 100644
index 0000000000..577ec10bea
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28734.patch
@@ -0,0 +1,67 @@
+From f03f09c2a07eae7f3a4646e33a406ae2689afb9e Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 10:59:41 +0530
+Subject: [PATCH] CVE-2022-28734
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=b26b4c08e7119281ff30d0fb4a6169bd2afa8fe4]
+CVE: CVE-2022-28734
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/http: Fix OOB write for split http headers
+
+GRUB has special code for handling an http header that is split
+across two packets.
+
+The code tracks the end of line by looking for a "\n" byte. The
+code for split headers has always advanced the pointer just past the
+end of the line, whereas the code that handles unsplit headers does
+not advance the pointer. This extra advance causes the length to be
+one greater, which breaks an assumption in parse_line(), leading to
+it writing a NUL byte one byte past the end of the buffer where we
+reconstruct the line from the two packets.
+
+It's conceivable that an attacker controlled set of packets could
+cause this to zero out the first byte of the "next" pointer of the
+grub_mm_region structure following the current_line buffer.
+
+Do not advance the pointer in the split header case.
+
+Fixes: CVE-2022-28734
+---
+ grub-core/net/http.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/grub-core/net/http.c b/grub-core/net/http.c
+index 5aa4ad3..a220d21 100644
+--- a/grub-core/net/http.c
++++ b/grub-core/net/http.c
+@@ -68,7 +68,15 @@ parse_line (grub_file_t file, http_data_t data, char *ptr, grub_size_t len)
+ char *end = ptr + len;
+ while (end > ptr && *(end - 1) == '\r')
+ end--;
++
++ /* LF without CR. */
++ if (end == ptr + len)
++ {
++ data->errmsg = grub_strdup (_("invalid HTTP header - LF without CR"));
++ return GRUB_ERR_NONE;
++ }
+ *end = 0;
++
+ /* Trailing CRLF. */
+ if (data->in_chunk_len == 1)
+ {
+@@ -190,9 +198,7 @@ http_receive (grub_net_tcp_socket_t sock __attribute__ ((unused)),
+ int have_line = 1;
+ char *t;
+ ptr = grub_memchr (nb->data, '\n', nb->tail - nb->data);
+- if (ptr)
+- ptr++;
+- else
++ if (ptr == NULL)
+ {
+ have_line = 0;
+ ptr = (char *) nb->tail;
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28735.patch b/meta/recipes-bsp/grub/files/CVE-2022-28735.patch
new file mode 100644
index 0000000000..89b653a8da
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28735.patch
@@ -0,0 +1,271 @@
+From 6fe755c5c07bb386fda58306bfd19e4a1c974c53 Mon Sep 17 00:00:00 2001
+From: Julian Andres Klode <julian.klode@canonical.com>
+Date: Thu, 2 Dec 2021 15:03:53 +0100
+Subject: kern/efi/sb: Reject non-kernel files in the shim_lock verifier
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=6fe755c5c07bb386fda58306bfd19e4a1c974c53]
+CVE: CVE-2022-28735
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+We must not allow other verifiers to pass things like the GRUB modules.
+Instead of maintaining a blocklist, maintain an allowlist of things
+that we do not care about.
+
+This allowlist really should be made reusable, and shared by the
+lockdown verifier, but this is the minimal patch addressing
+security concerns where the TPM verifier was able to mark modules
+as verified (or the OpenPGP verifier for that matter), when it
+should not do so on shim-powered secure boot systems.
+
+Fixes: CVE-2022-28735
+
+Signed-off-by: Julian Andres Klode <julian.klode@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/kern/efi/sb.c | 221 ++++++++++++++++++++++++++++++++++++++++
+ include/grub/verify.h | 1 +
+ 2 files changed, 222 insertions(+)
+ create mode 100644 grub-core/kern/efi/sb.c
+
+diff --git a/grub-core/kern/efi/sb.c b/grub-core/kern/efi/sb.c
+new file mode 100644
+index 0000000..89c4bb3
+--- /dev/null
++++ b/grub-core/kern/efi/sb.c
+@@ -0,0 +1,221 @@
++/*
++ * GRUB -- GRand Unified Bootloader
++ * Copyright (C) 2020 Free Software Foundation, Inc.
++ *
++ * GRUB is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GRUB is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * UEFI Secure Boot related checkings.
++ */
++
++#include <grub/efi/efi.h>
++#include <grub/efi/pe32.h>
++#include <grub/efi/sb.h>
++#include <grub/env.h>
++#include <grub/err.h>
++#include <grub/file.h>
++#include <grub/i386/linux.h>
++#include <grub/kernel.h>
++#include <grub/mm.h>
++#include <grub/types.h>
++#include <grub/verify.h>
++
++static grub_efi_guid_t shim_lock_guid = GRUB_EFI_SHIM_LOCK_GUID;
++
++/*
++ * Determine whether we're in secure boot mode.
++ *
++ * Please keep the logic in sync with the Linux kernel,
++ * drivers/firmware/efi/libstub/secureboot.c:efi_get_secureboot().
++ */
++grub_uint8_t
++grub_efi_get_secureboot (void)
++{
++ static grub_efi_guid_t efi_variable_guid = GRUB_EFI_GLOBAL_VARIABLE_GUID;
++ grub_efi_status_t status;
++ grub_efi_uint32_t attr = 0;
++ grub_size_t size = 0;
++ grub_uint8_t *secboot = NULL;
++ grub_uint8_t *setupmode = NULL;
++ grub_uint8_t *moksbstate = NULL;
++ grub_uint8_t secureboot = GRUB_EFI_SECUREBOOT_MODE_UNKNOWN;
++ const char *secureboot_str = "UNKNOWN";
++
++ status = grub_efi_get_variable ("SecureBoot", &efi_variable_guid,
++ &size, (void **) &secboot);
++
++ if (status == GRUB_EFI_NOT_FOUND)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ if (status != GRUB_EFI_SUCCESS)
++ goto out;
++
++ status = grub_efi_get_variable ("SetupMode", &efi_variable_guid,
++ &size, (void **) &setupmode);
++
++ if (status != GRUB_EFI_SUCCESS)
++ goto out;
++
++ if ((*secboot == 0) || (*setupmode == 1))
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ /*
++ * See if a user has put the shim into insecure mode. If so, and if the
++ * variable doesn't have the runtime attribute set, we might as well
++ * honor that.
++ */
++ status = grub_efi_get_variable_with_attributes ("MokSBState", &shim_lock_guid,
++ &size, (void **) &moksbstate, &attr);
++
++ /* If it fails, we don't care why. Default to secure. */
++ if (status != GRUB_EFI_SUCCESS)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_ENABLED;
++ goto out;
++ }
++
++ if (!(attr & GRUB_EFI_VARIABLE_RUNTIME_ACCESS) && *moksbstate == 1)
++ {
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_DISABLED;
++ goto out;
++ }
++
++ secureboot = GRUB_EFI_SECUREBOOT_MODE_ENABLED;
++
++ out:
++ grub_free (moksbstate);
++ grub_free (setupmode);
++ grub_free (secboot);
++
++ if (secureboot == GRUB_EFI_SECUREBOOT_MODE_DISABLED)
++ secureboot_str = "Disabled";
++ else if (secureboot == GRUB_EFI_SECUREBOOT_MODE_ENABLED)
++ secureboot_str = "Enabled";
++
++ grub_dprintf ("efi", "UEFI Secure Boot state: %s\n", secureboot_str);
++
++ return secureboot;
++}
++
++static grub_err_t
++shim_lock_verifier_init (grub_file_t io __attribute__ ((unused)),
++ enum grub_file_type type,
++ void **context __attribute__ ((unused)),
++ enum grub_verify_flags *flags)
++{
++ *flags = GRUB_VERIFY_FLAGS_NONE;
++
++ switch (type & GRUB_FILE_TYPE_MASK)
++ {
++ /* Files we check. */
++ case GRUB_FILE_TYPE_LINUX_KERNEL:
++ case GRUB_FILE_TYPE_MULTIBOOT_KERNEL:
++ case GRUB_FILE_TYPE_BSD_KERNEL:
++ case GRUB_FILE_TYPE_XNU_KERNEL:
++ case GRUB_FILE_TYPE_PLAN9_KERNEL:
++ case GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE:
++ *flags = GRUB_VERIFY_FLAGS_SINGLE_CHUNK;
++ return GRUB_ERR_NONE;
++
++ /* Files that do not affect secureboot state. */
++ case GRUB_FILE_TYPE_NONE:
++ case GRUB_FILE_TYPE_LOOPBACK:
++ case GRUB_FILE_TYPE_LINUX_INITRD:
++ case GRUB_FILE_TYPE_OPENBSD_RAMDISK:
++ case GRUB_FILE_TYPE_XNU_RAMDISK:
++ case GRUB_FILE_TYPE_SIGNATURE:
++ case GRUB_FILE_TYPE_PUBLIC_KEY:
++ case GRUB_FILE_TYPE_PUBLIC_KEY_TRUST:
++ case GRUB_FILE_TYPE_PRINT_BLOCKLIST:
++ case GRUB_FILE_TYPE_TESTLOAD:
++ case GRUB_FILE_TYPE_GET_SIZE:
++ case GRUB_FILE_TYPE_FONT:
++ case GRUB_FILE_TYPE_ZFS_ENCRYPTION_KEY:
++ case GRUB_FILE_TYPE_CAT:
++ case GRUB_FILE_TYPE_HEXCAT:
++ case GRUB_FILE_TYPE_CMP:
++ case GRUB_FILE_TYPE_HASHLIST:
++ case GRUB_FILE_TYPE_TO_HASH:
++ case GRUB_FILE_TYPE_KEYBOARD_LAYOUT:
++ case GRUB_FILE_TYPE_PIXMAP:
++ case GRUB_FILE_TYPE_GRUB_MODULE_LIST:
++ case GRUB_FILE_TYPE_CONFIG:
++ case GRUB_FILE_TYPE_THEME:
++ case GRUB_FILE_TYPE_GETTEXT_CATALOG:
++ case GRUB_FILE_TYPE_FS_SEARCH:
++ case GRUB_FILE_TYPE_LOADENV:
++ case GRUB_FILE_TYPE_SAVEENV:
++ case GRUB_FILE_TYPE_VERIFY_SIGNATURE:
++ *flags = GRUB_VERIFY_FLAGS_SKIP_VERIFICATION;
++ return GRUB_ERR_NONE;
++
++ /* Other files. */
++ default:
++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("prohibited by secure boot policy"));
++ }
++}
++
++static grub_err_t
++shim_lock_verifier_write (void *context __attribute__ ((unused)), void *buf, grub_size_t size)
++{
++ grub_efi_shim_lock_protocol_t *sl = grub_efi_locate_protocol (&shim_lock_guid, 0);
++
++ if (!sl)
++ return grub_error (GRUB_ERR_ACCESS_DENIED, N_("shim_lock protocol not found"));
++
++ if (sl->verify (buf, size) != GRUB_EFI_SUCCESS)
++ return grub_error (GRUB_ERR_BAD_SIGNATURE, N_("bad shim signature"));
++
++ return GRUB_ERR_NONE;
++}
++
++struct grub_file_verifier shim_lock_verifier =
++ {
++ .name = "shim_lock_verifier",
++ .init = shim_lock_verifier_init,
++ .write = shim_lock_verifier_write
++ };
++
++void
++grub_shim_lock_verifier_setup (void)
++{
++ struct grub_module_header *header;
++ grub_efi_shim_lock_protocol_t *sl =
++ grub_efi_locate_protocol (&shim_lock_guid, 0);
++
++ /* shim_lock is missing, check if GRUB image is built with --disable-shim-lock. */
++ if (!sl)
++ {
++ FOR_MODULES (header)
++ {
++ if (header->type == OBJ_TYPE_DISABLE_SHIM_LOCK)
++ return;
++ }
++ }
++
++ /* Secure Boot is off. Do not load shim_lock. */
++ if (grub_efi_get_secureboot () != GRUB_EFI_SECUREBOOT_MODE_ENABLED)
++ return;
++
++ /* Enforce shim_lock_verifier. */
++ grub_verifier_register (&shim_lock_verifier);
++
++ grub_env_set ("shim_lock", "y");
++ grub_env_export ("shim_lock");
++}
+diff --git a/include/grub/verify.h b/include/grub/verify.h
+index cd129c3..672ae16 100644
+--- a/include/grub/verify.h
++++ b/include/grub/verify.h
+@@ -24,6 +24,7 @@
+
+ enum grub_verify_flags
+ {
++ GRUB_VERIFY_FLAGS_NONE = 0,
+ GRUB_VERIFY_FLAGS_SKIP_VERIFICATION = 1,
+ GRUB_VERIFY_FLAGS_SINGLE_CHUNK = 2,
+ /* Defer verification to another authority. */
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-28736.patch b/meta/recipes-bsp/grub/files/CVE-2022-28736.patch
new file mode 100644
index 0000000000..4fc9fdaf05
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-28736.patch
@@ -0,0 +1,275 @@
+From 431a111c60095fc973d83fe9209f26f29ce78784 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 1 Aug 2022 11:17:17 +0530
+Subject: [PATCH] CVE-2022-28736
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=04c86e0bb7b58fc2f913f798cdb18934933e532d]
+CVE: CVE-2022-28736
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+loader/efi/chainloader: Use grub_loader_set_ex()
+
+This ports the EFI chainloader to use grub_loader_set_ex() in order to fix
+a use-after-free bug that occurs when grub_cmd_chainloader() is executed
+more than once before a boot attempt is performed.
+
+Fixes: CVE-2022-28736
+
+Signed-off-by: Chris Coulson <chris.coulson@canonical.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+---
+ grub-core/commands/boot.c | 66 ++++++++++++++++++++++++++----
+ grub-core/loader/efi/chainloader.c | 46 +++++++++++----------
+ include/grub/loader.h | 5 +++
+ 3 files changed, 87 insertions(+), 30 deletions(-)
+
+diff --git a/grub-core/commands/boot.c b/grub-core/commands/boot.c
+index bbca81e..6151478 100644
+--- a/grub-core/commands/boot.c
++++ b/grub-core/commands/boot.c
+@@ -27,10 +27,20 @@
+
+ GRUB_MOD_LICENSE ("GPLv3+");
+
+-static grub_err_t (*grub_loader_boot_func) (void);
+-static grub_err_t (*grub_loader_unload_func) (void);
++static grub_err_t (*grub_loader_boot_func) (void *context);
++static grub_err_t (*grub_loader_unload_func) (void *context);
++static void *grub_loader_context;
+ static int grub_loader_flags;
+
++struct grub_simple_loader_hooks
++{
++ grub_err_t (*boot) (void);
++ grub_err_t (*unload) (void);
++};
++
++/* Don't heap allocate this to avoid making grub_loader_set() fallible. */
++static struct grub_simple_loader_hooks simple_loader_hooks;
++
+ struct grub_preboot
+ {
+ grub_err_t (*preboot_func) (int);
+@@ -44,6 +54,29 @@ static int grub_loader_loaded;
+ static struct grub_preboot *preboots_head = 0,
+ *preboots_tail = 0;
+
++static grub_err_t
++grub_simple_boot_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++ return hooks->boot ();
++}
++
++static grub_err_t
++grub_simple_unload_hook (void *context)
++{
++ struct grub_simple_loader_hooks *hooks;
++ grub_err_t ret;
++
++ hooks = (struct grub_simple_loader_hooks *) context;
++
++ ret = hooks->unload ();
++ grub_memset (hooks, 0, sizeof (*hooks));
++
++ return ret;
++}
++
+ int
+ grub_loader_is_loaded (void)
+ {
+@@ -110,28 +143,45 @@ grub_loader_unregister_preboot_hook (struct grub_preboot *hnd)
+ }
+
+ void
+-grub_loader_set (grub_err_t (*boot) (void),
+- grub_err_t (*unload) (void),
+- int flags)
++grub_loader_set_ex (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = boot;
+ grub_loader_unload_func = unload;
++ grub_loader_context = context;
+ grub_loader_flags = flags;
+
+ grub_loader_loaded = 1;
+ }
+
++void
++grub_loader_set (grub_err_t (*boot) (void),
++ grub_err_t (*unload) (void),
++ int flags)
++{
++ grub_loader_set_ex (grub_simple_boot_hook,
++ grub_simple_unload_hook,
++ &simple_loader_hooks,
++ flags);
++
++ simple_loader_hooks.boot = boot;
++ simple_loader_hooks.unload = unload;
++}
++
+ void
+ grub_loader_unset(void)
+ {
+ if (grub_loader_loaded && grub_loader_unload_func)
+- grub_loader_unload_func ();
++ grub_loader_unload_func (grub_loader_context);
+
+ grub_loader_boot_func = 0;
+ grub_loader_unload_func = 0;
++ grub_loader_context = 0;
+
+ grub_loader_loaded = 0;
+ }
+@@ -158,7 +208,7 @@ grub_loader_boot (void)
+ return err;
+ }
+ }
+- err = (grub_loader_boot_func) ();
++ err = (grub_loader_boot_func) (grub_loader_context);
+
+ for (cur = preboots_tail; cur; cur = cur->prev)
+ if (! err)
+diff --git a/grub-core/loader/efi/chainloader.c b/grub-core/loader/efi/chainloader.c
+index a8d7b91..93a028a 100644
+--- a/grub-core/loader/efi/chainloader.c
++++ b/grub-core/loader/efi/chainloader.c
+@@ -44,33 +44,28 @@ GRUB_MOD_LICENSE ("GPLv3+");
+
+ static grub_dl_t my_mod;
+
+-static grub_efi_physical_address_t address;
+-static grub_efi_uintn_t pages;
+-static grub_efi_device_path_t *file_path;
+-static grub_efi_handle_t image_handle;
+-static grub_efi_char16_t *cmdline;
+-
+ static grub_err_t
+-grub_chainloader_unload (void)
++grub_chainloader_unload (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
++ grub_efi_loaded_image_t *loaded_image;
+ grub_efi_boot_services_t *b;
+
++ loaded_image = grub_efi_get_loaded_image (image_handle);
++ if (loaded_image != NULL)
++ grub_free (loaded_image->load_options);
++
+ b = grub_efi_system_table->boot_services;
+ efi_call_1 (b->unload_image, image_handle);
+- efi_call_2 (b->free_pages, address, pages);
+-
+- grub_free (file_path);
+- grub_free (cmdline);
+- cmdline = 0;
+- file_path = 0;
+
+ grub_dl_unref (my_mod);
+ return GRUB_ERR_NONE;
+ }
+
+ static grub_err_t
+-grub_chainloader_boot (void)
++grub_chainloader_boot (void *context)
+ {
++ grub_efi_handle_t image_handle = (grub_efi_handle_t) context;
+ grub_efi_boot_services_t *b;
+ grub_efi_status_t status;
+ grub_efi_uintn_t exit_data_size;
+@@ -139,7 +134,7 @@ make_file_path (grub_efi_device_path_t *dp, const char *filename)
+ char *dir_start;
+ char *dir_end;
+ grub_size_t size;
+- grub_efi_device_path_t *d;
++ grub_efi_device_path_t *d, *file_path;
+
+ dir_start = grub_strchr (filename, ')');
+ if (! dir_start)
+@@ -215,11 +210,15 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_efi_status_t status;
+ grub_efi_boot_services_t *b;
+ grub_device_t dev = 0;
+- grub_efi_device_path_t *dp = 0;
++ grub_efi_device_path_t *dp = NULL, *file_path = NULL;
+ grub_efi_loaded_image_t *loaded_image;
+ char *filename;
+ void *boot_image = 0;
+ grub_efi_handle_t dev_handle = 0;
++ grub_efi_physical_address_t address = 0;
++ grub_efi_uintn_t pages = 0;
++ grub_efi_char16_t *cmdline = NULL;
++ grub_efi_handle_t image_handle = NULL;
+
+ if (argc == 0)
+ return grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
+@@ -227,11 +226,6 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+
+ grub_dl_ref (my_mod);
+
+- /* Initialize some global variables. */
+- address = 0;
+- image_handle = 0;
+- file_path = 0;
+-
+ b = grub_efi_system_table->boot_services;
+
+ file = grub_file_open (filename, GRUB_FILE_TYPE_EFI_CHAINLOADED_IMAGE);
+@@ -401,7 +395,11 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ grub_file_close (file);
+ grub_device_close (dev);
+
+- grub_loader_set (grub_chainloader_boot, grub_chainloader_unload, 0);
++ /* We're finished with the source image buffer and file path now. */
++ efi_call_2 (b->free_pages, address, pages);
++ grub_free (file_path);
++
++ grub_loader_set_ex (grub_chainloader_boot, grub_chainloader_unload, image_handle, 0);
+ return 0;
+
+ fail:
+@@ -412,11 +410,15 @@ grub_cmd_chainloader (grub_command_t cmd __attribute__ ((unused)),
+ if (file)
+ grub_file_close (file);
+
++ grub_free (cmdline);
+ grub_free (file_path);
+
+ if (address)
+ efi_call_2 (b->free_pages, address, pages);
+
++ if (image_handle != NULL)
++ efi_call_1 (b->unload_image, image_handle);
++
+ grub_dl_unref (my_mod);
+
+ return grub_errno;
+diff --git a/include/grub/loader.h b/include/grub/loader.h
+index 7f82a49..3071a50 100644
+--- a/include/grub/loader.h
++++ b/include/grub/loader.h
+@@ -39,6 +39,11 @@ void EXPORT_FUNC (grub_loader_set) (grub_err_t (*boot) (void),
+ grub_err_t (*unload) (void),
+ int flags);
+
++void EXPORT_FUNC (grub_loader_set_ex) (grub_err_t (*boot) (void *context),
++ grub_err_t (*unload) (void *context),
++ void *context,
++ int flags);
++
+ /* Unset current loader, if any. */
+ void EXPORT_FUNC (grub_loader_unset) (void);
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2022-3775.patch b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
new file mode 100644
index 0000000000..e2e3f35584
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2022-3775.patch
@@ -0,0 +1,97 @@
+From fdbe7209152ad6f09a1166f64f162017f2145ba3 Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Mon, 24 Oct 2022 08:05:35 +0800
+Subject: [PATCH] font: Fix an integer underflow in blit_comb()
+
+The expression (ctx.bounds.height - combining_glyphs[i]->height) / 2 may
+evaluate to a very big invalid value even if both ctx.bounds.height and
+combining_glyphs[i]->height are small integers. For example, if
+ctx.bounds.height is 10 and combining_glyphs[i]->height is 12, this
+expression evaluates to 2147483647 (expected -1). This is because
+coordinates are allowed to be negative but ctx.bounds.height is an
+unsigned int. So, the subtraction operates on unsigned ints and
+underflows to a very big value. The division makes things even worse.
+The quotient is still an invalid value even if converted back to int.
+
+This patch fixes the problem by casting ctx.bounds.height to int. As
+a result the subtraction will operate on int and grub_uint16_t which
+will be promoted to an int. So, the underflow will no longer happen. Other
+uses of ctx.bounds.height (and ctx.bounds.width) are also casted to int,
+to ensure coordinates are always calculated on signed integers.
+
+Fixes: CVE-2022-3775
+
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=992c06191babc1e109caf40d6a07ec6fdef427af]
+CVE: CVE-2022-3775
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index f110db9..3b76b22 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -1200,12 +1200,12 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ ctx.bounds.height = main_glyph->height;
+
+ above_rightx = main_glyph->offset_x + main_glyph->width;
+- above_righty = ctx.bounds.y + ctx.bounds.height;
++ above_righty = ctx.bounds.y + (int) ctx.bounds.height;
+
+ above_leftx = main_glyph->offset_x;
+- above_lefty = ctx.bounds.y + ctx.bounds.height;
++ above_lefty = ctx.bounds.y + (int) ctx.bounds.height;
+
+- below_rightx = ctx.bounds.x + ctx.bounds.width;
++ below_rightx = ctx.bounds.x + (int) ctx.bounds.width;
+ below_righty = ctx.bounds.y;
+
+ comb = grub_unicode_get_comb (glyph_id);
+@@ -1218,7 +1218,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ if (!combining_glyphs[i])
+ continue;
+- targetx = (ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
++ targetx = ((int) ctx.bounds.width - combining_glyphs[i]->width) / 2 + ctx.bounds.x;
+ /* CGJ is to avoid diacritics reordering. */
+ if (comb[i].code
+ == GRUB_UNICODE_COMBINING_GRAPHEME_JOINER)
+@@ -1228,8 +1228,8 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ case GRUB_UNICODE_COMB_OVERLAY:
+ do_blit (combining_glyphs[i],
+ targetx,
+- (ctx.bounds.height - combining_glyphs[i]->height) / 2
+- - (ctx.bounds.height + ctx.bounds.y), &ctx);
++ ((int) ctx.bounds.height - combining_glyphs[i]->height) / 2
++ - ((int) ctx.bounds.height + ctx.bounds.y), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+ break;
+@@ -1302,7 +1302,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+ /* Fallthrough. */
+ case GRUB_UNICODE_STACK_ATTACHED_ABOVE:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height + ctx.bounds.y + space
++ -((int) ctx.bounds.height + ctx.bounds.y + space
+ + combining_glyphs[i]->height), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+@@ -1310,7 +1310,7 @@ blit_comb (const struct grub_unicode_glyph *glyph_id,
+
+ case GRUB_UNICODE_COMB_HEBREW_DAGESH:
+ do_blit (combining_glyphs[i], targetx,
+- -(ctx.bounds.height / 2 + ctx.bounds.y
++ -((int) ctx.bounds.height / 2 + ctx.bounds.y
+ + combining_glyphs[i]->height / 2), &ctx);
+ if (min_devwidth < combining_glyphs[i]->width)
+ min_devwidth = combining_glyphs[i]->width;
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4692.patch b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
new file mode 100644
index 0000000000..0e74870ebf
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4692.patch
@@ -0,0 +1,97 @@
+From 43651027d24e62a7a463254165e1e46e42aecdea Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Mon, 28 Aug 2023 16:31:57 +0300
+Subject: [PATCH] fs/ntfs: Fix an OOB write when parsing the $ATTRIBUTE_LIST
+ attribute for the $MFT file
+
+When parsing an extremely fragmented $MFT file, i.e., the file described
+using the $ATTRIBUTE_LIST attribute, current NTFS code will reuse a buffer
+containing bytes read from the underlying drive to store sector numbers,
+which are consumed later to read data from these sectors into another buffer.
+
+These sectors numbers, two 32-bit integers, are always stored at predefined
+offsets, 0x10 and 0x14, relative to first byte of the selected entry within
+the $ATTRIBUTE_LIST attribute. Usually, this won't cause any problem.
+
+However, when parsing a specially-crafted file system image, this may cause
+the NTFS code to write these integers beyond the buffer boundary, likely
+causing the GRUB memory allocator to misbehave or fail. These integers contain
+values which are controlled by on-disk structures of the NTFS file system.
+
+Such modification and resulting misbehavior may touch a memory range not
+assigned to the GRUB and owned by firmware or another EFI application/driver.
+
+This fix introduces checks to ensure that these sector numbers are never
+written beyond the boundary.
+
+Fixes: CVE-2023-4692
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=43651027d24e62a7a463254165e1e46e42aecdea]
+CVE: CVE-2023-4692
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/fs/ntfs.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index 2f34f76..c8d3683 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -184,7 +184,7 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ if (at->attr_end)
+ {
+- grub_uint8_t *pa;
++ grub_uint8_t *pa, *pa_end;
+
+ at->emft_buf = grub_malloc (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ if (at->emft_buf == NULL)
+@@ -209,11 +209,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ }
+ at->attr_nxt = at->edat_buf;
+ at->attr_end = at->edat_buf + u32at (pa, 0x30);
++ pa_end = at->edat_buf + n;
+ }
+ else
+ {
+ at->attr_nxt = at->attr_end + u16at (pa, 0x14);
+ at->attr_end = at->attr_end + u32at (pa, 4);
++ pa_end = at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR);
+ }
+ at->flags |= GRUB_NTFS_AF_ALST;
+ while (at->attr_nxt < at->attr_end)
+@@ -230,6 +232,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ at->flags |= GRUB_NTFS_AF_GPOS;
+ at->attr_cur = at->attr_nxt;
+ pa = at->attr_cur;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ grub_set_unaligned32 ((char *) pa + 0x10,
+ grub_cpu_to_le32 (at->mft->data->mft_start));
+ grub_set_unaligned32 ((char *) pa + 0x14,
+@@ -240,6 +249,13 @@ find_attr (struct grub_ntfs_attr *at, grub_uint8_t attr)
+ {
+ if (*pa != attr)
+ break;
++
++ if ((pa >= pa_end) || (pa_end - pa < 0x18))
++ {
++ grub_error (GRUB_ERR_BAD_FS, "can\'t parse attribute list");
++ return NULL;
++ }
++
+ if (read_attr
+ (at, pa + 0x10,
+ u32at (pa, 0x10) * (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR),
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/CVE-2023-4693.patch b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
new file mode 100644
index 0000000000..1e6b6efdec
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/CVE-2023-4693.patch
@@ -0,0 +1,62 @@
+From 0ed2458cc4eff6d9a9199527e2a0b6d445802f94 Mon Sep 17 00:00:00 2001
+From: Maxim Suhanov <dfirblog@gmail.com>
+Date: Mon, 28 Aug 2023 16:32:33 +0300
+Subject: [PATCH] fs/ntfs: Fix an OOB read when reading data from the resident
+ $DATA attribute
+
+When reading a file containing resident data, i.e., the file data is stored in
+the $DATA attribute within the NTFS file record, not in external clusters,
+there are no checks that this resident data actually fits the corresponding
+file record segment.
+
+When parsing a specially-crafted file system image, the current NTFS code will
+read the file data from an arbitrary, attacker-chosen memory offset and of
+arbitrary, attacker-chosen length.
+
+This allows an attacker to display arbitrary chunks of memory, which could
+contain sensitive information like password hashes or even plain-text,
+obfuscated passwords from BS EFI variables.
+
+This fix implements a check to ensure that resident data is read from the
+corresponding file record segment only.
+
+Fixes: CVE-2023-4693
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Maxim Suhanov <dfirblog@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/gitweb/?p=grub.git;a=commit;h=0ed2458cc4eff6d9a9199527e2a0b6d445802f94]
+CVE: CVE-2023-4693
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/fs/ntfs.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/grub-core/fs/ntfs.c b/grub-core/fs/ntfs.c
+index c8d3683..4d1fe42 100644
+--- a/grub-core/fs/ntfs.c
++++ b/grub-core/fs/ntfs.c
+@@ -401,7 +401,18 @@ read_data (struct grub_ntfs_attr *at, grub_uint8_t *pa, grub_uint8_t *dest,
+ {
+ if (ofs + len > u32at (pa, 0x10))
+ return grub_error (GRUB_ERR_BAD_FS, "read out of range");
+- grub_memcpy (dest, pa + u32at (pa, 0x14) + ofs, len);
++
++ if (u32at (pa, 0x10) > (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute too large");
++
++ if (pa >= at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR))
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ if (u16at (pa, 0x14) + u32at (pa, 0x10) >
++ (grub_addr_t) at->mft->buf + (at->mft->data->mft_size << GRUB_NTFS_BLK_SHR) - (grub_addr_t) pa)
++ return grub_error (GRUB_ERR_BAD_FS, "resident attribute out of range");
++
++ grub_memcpy (dest, pa + u16at (pa, 0x14) + ofs, len);
+ return 0;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/files/determinism.patch b/meta/recipes-bsp/grub/files/determinism.patch
index 3c1f562c71..bd4e7188ec 100644
--- a/meta/recipes-bsp/grub/files/determinism.patch
+++ b/meta/recipes-bsp/grub/files/determinism.patch
@@ -11,7 +11,7 @@ missing sorting of the list used to generate it. Add such a sort.
Also ensure the generated unidata.c file is deterministic by sorting the
keys of the dict.
-Upstream-Status: Pending
+Upstream-Status: Submitted [https://lists.gnu.org/archive/html/grub-devel/2023-06/index.html]
Richard Purdie <richard.purdie@linuxfoundation.org>
Index: grub-2.04/grub-core/genmoddep.awk
diff --git a/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch b/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
new file mode 100644
index 0000000000..d4ba3cafc5
--- /dev/null
+++ b/meta/recipes-bsp/grub/files/font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch
@@ -0,0 +1,117 @@
+From 1f511ae054fe42dce7aedfbfe0f234fa1e0a7a3e Mon Sep 17 00:00:00 2001
+From: Zhang Boyang <zhangboyang.id@gmail.com>
+Date: Fri, 5 Aug 2022 00:51:20 +0800
+Subject: [PATCH] font: Fix size overflow in grub_font_get_glyph_internal()
+
+The length of memory allocation and file read may overflow. This patch
+fixes the problem by using safemath macros.
+
+There is a lot of code repetition like "(x * y + 7) / 8". It is unsafe
+if overflow happens. This patch introduces grub_video_bitmap_calc_1bpp_bufsz().
+It is safe replacement for such code. It has safemath-like prototype.
+
+This patch also introduces grub_cast(value, pointer), it casts value to
+typeof(*pointer) then store the value to *pointer. It returns true when
+overflow occurs or false if there is no overflow. The semantics of arguments
+and return value are designed to be consistent with other safemath macros.
+
+Signed-off-by: Zhang Boyang <zhangboyang.id@gmail.com>
+Reviewed-by: Daniel Kiper <daniel.kiper@oracle.com>
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/grub.git/commit/?id=9c76ec09ae08155df27cd237eaea150b4f02f532]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ grub-core/font/font.c | 17 +++++++++++++----
+ include/grub/bitmap.h | 18 ++++++++++++++++++
+ include/grub/safemath.h | 2 ++
+ 3 files changed, 33 insertions(+), 4 deletions(-)
+
+diff --git a/grub-core/font/font.c b/grub-core/font/font.c
+index 5edb477..df17dba 100644
+--- a/grub-core/font/font.c
++++ b/grub-core/font/font.c
+@@ -733,7 +733,8 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ grub_int16_t xoff;
+ grub_int16_t yoff;
+ grub_int16_t dwidth;
+- int len;
++ grub_ssize_t len;
++ grub_size_t sz;
+
+ if (index_entry->glyph)
+ /* Return cached glyph. */
+@@ -760,9 +761,17 @@ grub_font_get_glyph_internal (grub_font_t font, grub_uint32_t code)
+ return 0;
+ }
+
+- len = (width * height + 7) / 8;
+- glyph = grub_malloc (sizeof (struct grub_font_glyph) + len);
+- if (!glyph)
++ /* Calculate real struct size of current glyph. */
++ if (grub_video_bitmap_calc_1bpp_bufsz (width, height, &len) ||
++ grub_add (sizeof (struct grub_font_glyph), len, &sz))
++ {
++ remove_font (font);
++ return 0;
++ }
++
++ /* Allocate and initialize the glyph struct. */
++ glyph = grub_malloc (sz);
++ if (glyph == NULL)
+ {
+ remove_font (font);
+ return 0;
+diff --git a/include/grub/bitmap.h b/include/grub/bitmap.h
+index 5728f8c..0d9603f 100644
+--- a/include/grub/bitmap.h
++++ b/include/grub/bitmap.h
+@@ -23,6 +23,7 @@
+ #include <grub/symbol.h>
+ #include <grub/types.h>
+ #include <grub/video.h>
++#include <grub/safemath.h>
+
+ struct grub_video_bitmap
+ {
+@@ -79,6 +80,23 @@ grub_video_bitmap_get_height (struct grub_video_bitmap *bitmap)
+ return bitmap->mode_info.height;
+ }
+
++/*
++ * Calculate and store the size of data buffer of 1bit bitmap in result.
++ * Equivalent to "*result = (width * height + 7) / 8" if no overflow occurs.
++ * Return true when overflow occurs or false if there is no overflow.
++ * This function is intentionally implemented as a macro instead of
++ * an inline function. Although a bit awkward, it preserves data types for
++ * safemath macros and reduces macro side effects as much as possible.
++ *
++ * XXX: Will report false overflow if width * height > UINT64_MAX.
++ */
++#define grub_video_bitmap_calc_1bpp_bufsz(width, height, result) \
++({ \
++ grub_uint64_t _bitmap_pixels; \
++ grub_mul ((width), (height), &_bitmap_pixels) ? 1 : \
++ grub_cast (_bitmap_pixels / GRUB_CHAR_BIT + !!(_bitmap_pixels % GRUB_CHAR_BIT), (result)); \
++})
++
+ void EXPORT_FUNC (grub_video_bitmap_get_mode_info) (struct grub_video_bitmap *bitmap,
+ struct grub_video_mode_info *mode_info);
+
+diff --git a/include/grub/safemath.h b/include/grub/safemath.h
+index c17b89b..bb0f826 100644
+--- a/include/grub/safemath.h
++++ b/include/grub/safemath.h
+@@ -30,6 +30,8 @@
+ #define grub_sub(a, b, res) __builtin_sub_overflow(a, b, res)
+ #define grub_mul(a, b, res) __builtin_mul_overflow(a, b, res)
+
++#define grub_cast(a, res) grub_add ((a), 0, (res))
++
+ #else
+ #error gcc 5.1 or newer or clang 3.8 or newer is required
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-bsp/grub/grub2.inc b/meta/recipes-bsp/grub/grub2.inc
index 0d3f6d05da..bea03f4fc1 100644
--- a/meta/recipes-bsp/grub/grub2.inc
+++ b/meta/recipes-bsp/grub/grub2.inc
@@ -95,6 +95,22 @@ SRC_URI = "${GNU_MIRROR}/grub/grub-${PV}.tar.gz \
file://0044-script-execute-Fix-NULL-dereference-in-grub_script_e.patch \
file://0045-commands-ls-Require-device_name-is-not-NULL-before-p.patch \
file://0046-script-execute-Avoid-crash-when-using-outside-a-func.patch \
+ file://CVE-2021-3981.patch \
+ file://CVE-2021-3695.patch \
+ file://CVE-2021-3696.patch \
+ file://CVE-2021-3697.patch \
+ file://CVE-2022-28733.patch \
+ file://CVE-2022-28734.patch \
+ file://CVE-2022-28736.patch \
+ file://CVE-2022-28735.patch \
+ file://font-Fix-size-overflow-in-grub_font_get_glyph_intern.patch \
+ file://CVE-2022-2601.patch \
+ file://CVE-2022-3775.patch \
+ file://CVE-2020-27749.patch \
+ file://CVE-2021-20225.patch \
+ file://CVE-2021-20233.patch \
+ file://CVE-2023-4692.patch \
+ file://CVE-2023-4693.patch \
"
SRC_URI[md5sum] = "5ce674ca6b2612d8939b9e6abed32934"
SRC_URI[sha256sum] = "f10c85ae3e204dbaec39ae22fa3c5e99f0665417e91c2cb49b7e5031658ba6ea"
@@ -114,6 +130,8 @@ GRUBPLATFORM ??= "pc"
inherit autotools gettext texinfo pkgconfig
+CFLAGS_remove = "-O2"
+
EXTRA_OECONF = "--with-platform=${GRUBPLATFORM} \
--disable-grub-mkfont \
--program-prefix="" \
diff --git a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
index cac09101c4..fa3b993788 100644
--- a/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
+++ b/meta/recipes-bsp/pm-utils/pm-utils_1.4.1.bb
@@ -19,9 +19,12 @@ PACKAGECONFIG[manpages] = "--enable-doc, --disable-doc, libxslt-native xmlto-nat
RDEPENDS_${PN} = "grep bash"
+EXTRA_OECONF = "--libdir=${nonarch_libdir}"
+
do_configure_prepend () {
( cd ${S}; autoreconf -f -i -s )
}
-FILES_${PN} += "${libdir}/${BPN}/*"
+FILES_${PN} += "${nonarch_libdir}/${BPN}/*"
FILES_${PN}-dbg += "${datadir}/doc/pm-utils/README.debugging"
+FILES_${PN}-dev += "${nonarch_libdir}/pkgconfig/pm-utils.pc"
diff --git a/meta/recipes-connectivity/avahi/avahi.inc b/meta/recipes-connectivity/avahi/avahi.inc
index 25bb41b738..e1dfc7a861 100644
--- a/meta/recipes-connectivity/avahi/avahi.inc
+++ b/meta/recipes-connectivity/avahi/avahi.inc
@@ -22,6 +22,15 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2d5025d4aa3495befef8f17206a5b0a1 \
SRC_URI = "https://github.com/lathiat/avahi/releases/download/v${PV}/avahi-${PV}.tar.gz \
file://fix-CVE-2017-6519.patch \
file://CVE-2021-3468.patch \
+ file://CVE-2023-1981.patch \
+ file://CVE-2023-38469-1.patch \
+ file://CVE-2023-38469-2.patch \
+ file://CVE-2023-38470-1.patch \
+ file://CVE-2023-38470-2.patch \
+ file://CVE-2023-38471-1.patch \
+ file://CVE-2023-38471-2.patch \
+ file://CVE-2023-38472.patch \
+ file://CVE-2023-38473.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/lathiat/avahi/releases/"
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
new file mode 100644
index 0000000000..1209864402
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-1981.patch
@@ -0,0 +1,60 @@
+Backport of:
+
+From a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Thu, 17 Nov 2022 01:51:53 +0100
+Subject: [PATCH] Emit error if requested service is not found
+
+It currently just crashes instead of replying with error. Check return
+value and emit error instead of passing NULL pointer to reply.
+
+Fixes #375
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-1981.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/a2696da2f2c50ac43b6c4903f72290d5c3fa9f6f]
+CVE: CVE-2023-1981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-daemon/dbus-protocol.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/avahi-daemon/dbus-protocol.c
++++ b/avahi-daemon/dbus-protocol.c
+@@ -391,10 +391,14 @@ static DBusHandlerResult msg_server_impl
+ }
+
+ t = avahi_alternative_host_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
+-
+- return DBUS_HANDLER_RESULT_HANDLED;
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
++
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Hostname not found");
++ }
+
+ } else if (dbus_message_is_method_call(m, AVAHI_DBUS_INTERFACE_SERVER, "GetAlternativeServiceName")) {
+ char *n, *t;
+@@ -405,10 +409,14 @@ static DBusHandlerResult msg_server_impl
+ }
+
+ t = avahi_alternative_service_name(n);
+- avahi_dbus_respond_string(c, m, t);
+- avahi_free(t);
+-
+- return DBUS_HANDLER_RESULT_HANDLED;
++ if (t) {
++ avahi_dbus_respond_string(c, m, t);
++ avahi_free(t);
++
++ return DBUS_HANDLER_RESULT_HANDLED;
++ } else {
++ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NOT_FOUND, "Service not found");
++ }
+
+ } else if (dbus_message_is_method_call(m, AVAHI_DBUS_INTERFACE_SERVER, "EntryGroupNew")) {
+ Client *client;
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
new file mode 100644
index 0000000000..12dad9ef6f
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-1.patch
@@ -0,0 +1,48 @@
+From a337a1ba7d15853fb56deef1f464529af6e3a1cf Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Mon, 23 Oct 2023 20:29:31 +0000
+Subject: [PATCH] core: reject overly long TXT resource records
+
+Closes https://github.com/lathiat/avahi/issues/455
+
+CVE-2023-38469
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38469-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/a337a1ba7d15853fb56deef1f464529af6e3a1cf]
+CVE: CVE-2023-38469
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/rr.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-core/rr.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/rr.c
++++ avahi-0.7/avahi-core/rr.c
+@@ -32,6 +32,7 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/defs.h>
+
++#include "dns.h"
+ #include "rr.h"
+ #include "log.h"
+ #include "util.h"
+@@ -688,11 +689,17 @@ int avahi_record_is_valid(AvahiRecord *r
+ case AVAHI_DNS_TYPE_TXT: {
+
+ AvahiStringList *strlst;
++ size_t used = 0;
+
+- for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next)
++ for (strlst = r->data.txt.string_list; strlst; strlst = strlst->next) {
+ if (strlst->size > 255 || strlst->size <= 0)
+ return 0;
+
++ used += 1+strlst->size;
++ if (used > AVAHI_DNS_RDATA_MAX)
++ return 0;
++ }
++
+ return 1;
+ }
+ }
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
new file mode 100644
index 0000000000..a62c718ebe
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38469-2.patch
@@ -0,0 +1,65 @@
+From c6cab87df290448a63323c8ca759baa516166237 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Wed, 25 Oct 2023 18:15:42 +0000
+Subject: [PATCH] tests: pass overly long TXT resource records
+
+to make sure they don't crash avahi any more.
+It reproduces https://github.com/lathiat/avahi/issues/455
+
+Canonical notes:
+nickgalanis> removed first hunk since there is no .github dir in this release
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38469-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/c6cab87df290448a63323c8ca759baa516166237]
+CVE: CVE-2023-38469
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 14 ++++++++++++++
+ 1 files changed, 14 insertions(+)
+
+Index: avahi-0.7/avahi-client/client-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-client/client-test.c
++++ avahi-0.7/avahi-client/client-test.c
+@@ -22,6 +22,7 @@
+ #endif
+
+ #include <stdio.h>
++#include <string.h>
+ #include <assert.h>
+
+ #include <avahi-client/client.h>
+@@ -33,6 +34,8 @@
+ #include <avahi-common/malloc.h>
+ #include <avahi-common/timeval.h>
+
++#include <avahi-core/dns.h>
++
+ static const AvahiPoll *poll_api = NULL;
+ static AvahiSimplePoll *simple_poll = NULL;
+
+@@ -222,6 +225,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ uint32_t cookie;
+ struct timeval tv;
+ AvahiAddress a;
++ uint8_t rdata[AVAHI_DNS_RDATA_MAX+1];
++ AvahiStringList *txt = NULL;
++ int r;
+
+ simple_poll = avahi_simple_poll_new();
+ poll_api = avahi_simple_poll_get(simple_poll);
+@@ -258,6 +264,14 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ printf("%s\n", avahi_strerror(avahi_entry_group_add_service (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "Lathiat's Site", "_http._tcp", NULL, NULL, 80, "foo=bar", NULL)));
+ printf("add_record: %d\n", avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "\5booya", 6));
+
++ memset(rdata, 1, sizeof(rdata));
++ r = avahi_string_list_parse(rdata, sizeof(rdata), &txt);
++ assert(r >= 0);
++ assert(avahi_string_list_serialize(txt, NULL, 0) == sizeof(rdata));
++ error = avahi_entry_group_add_service_strlst(group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", "_qotd._tcp", NULL, NULL, 123, txt);
++ assert(error == AVAHI_ERR_INVALID_RECORD);
++ avahi_string_list_free(txt);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
new file mode 100644
index 0000000000..82fb1ab40b
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-1.patch
@@ -0,0 +1,57 @@
+From 94cb6489114636940ac683515417990b55b5d66c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Petr=20Men=C5=A1=C3=ADk?= <pemensik@redhat.com>
+Date: Tue, 11 Apr 2023 15:29:59 +0200
+Subject: [PATCH] Ensure each label is at least one byte long
+
+The only allowed exception is single dot, where it should return empty
+string.
+
+Fixes #454.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38470-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/94cb6489114636940ac683515417990b55b5d66c]
+CVE: CVE-2023-38470
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/domain-test.c | 14 ++++++++++++++
+ avahi-common/domain.c | 2 +-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-common/domain-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain-test.c
++++ avahi-0.7/avahi-common/domain-test.c
+@@ -45,6 +45,20 @@ int main(AVAHI_GCC_UNUSED int argc, AVAH
+ printf("%s\n", s = avahi_normalize_name_strdup("fo\\\\o\\..f oo."));
+ avahi_free(s);
+
++ printf("%s\n", s = avahi_normalize_name_strdup("."));
++ avahi_free(s);
++
++ s = avahi_normalize_name_strdup(",.=.}.=.?-.}.=.?.?.}.}.?.?.?.z.?.?.}.}."
++ "}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.}.}.}"
++ ".?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.=.=.?.?.}.}.?.?.?.zM.?`"
++ "?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}??.}.}.?.?."
++ "?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM.?`?.}.}.}."
++ "??.?.zM.?`?.}.}.}.?.?.?.r.=.?.}.=.?.?.}.?.?.?.}.=.?.?.}?"
++ "?.}.}.?.?.?.z.?.?.}.}.}.?.?.?.r.=.=.}.=.?.}}.}.?.?.?.zM."
++ "?`?.}.}.}.?.?.?.r.=.=.?.?`.?.?}.}.}.?.?.?.r.=.?.}.=.?.?."
++ "}.?.?.?.}.=.?.?.}");
++ assert(s == NULL);
++
+ printf("%i\n", avahi_domain_equal("\\065aa bbb\\.\\046cc.cc\\\\.dee.fff.", "Aaa BBB\\.\\.cc.cc\\\\.dee.fff"));
+ printf("%i\n", avahi_domain_equal("A", "a"));
+
+Index: avahi-0.7/avahi-common/domain.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain.c
++++ avahi-0.7/avahi-common/domain.c
+@@ -201,7 +201,7 @@ char *avahi_normalize_name(const char *s
+ }
+
+ if (!empty) {
+- if (size < 1)
++ if (size < 2)
+ return NULL;
+
+ *(r++) = '.';
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
new file mode 100644
index 0000000000..403ed6fd6a
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38470-2.patch
@@ -0,0 +1,53 @@
+From 20dec84b2480821704258bc908e7b2bd2e883b24 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 19 Sep 2023 03:21:25 +0000
+Subject: [PATCH] [common] bail out when escaped labels can't fit into ret
+
+Fixes:
+```
+==93410==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f9e76f14c16 at pc 0x00000047208d bp 0x7ffee90a6a00 sp 0x7ffee90a61c8
+READ of size 1110 at 0x7f9e76f14c16 thread T0
+ #0 0x47208c in __interceptor_strlen (out/fuzz-domain+0x47208c) (BuildId: 731b20c1eef22c2104e75a6496a399b10cfc7cba)
+ #1 0x534eb0 in avahi_strdup avahi/avahi-common/malloc.c:167:12
+ #2 0x53862c in avahi_normalize_name_strdup avahi/avahi-common/domain.c:226:12
+```
+and
+```
+fuzz-domain: fuzz/fuzz-domain.c:38: int LLVMFuzzerTestOneInput(const uint8_t *, size_t): Assertion `avahi_domain_equal(s, t)' failed.
+==101571== ERROR: libFuzzer: deadly signal
+ #0 0x501175 in __sanitizer_print_stack_trace (/home/vagrant/avahi/out/fuzz-domain+0x501175) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #1 0x45ad2c in fuzzer::PrintStackTrace() (/home/vagrant/avahi/out/fuzz-domain+0x45ad2c) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #2 0x43fc07 in fuzzer::Fuzzer::CrashCallback() (/home/vagrant/avahi/out/fuzz-domain+0x43fc07) (BuildId: 682bf6400aff9d41b64b6e2cc3ef5ad600216ea8)
+ #3 0x7f1581d7ebaf (/lib64/libc.so.6+0x3dbaf) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #4 0x7f1581dcf883 in __pthread_kill_implementation (/lib64/libc.so.6+0x8e883) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #5 0x7f1581d7eafd in gsignal (/lib64/libc.so.6+0x3dafd) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #6 0x7f1581d6787e in abort (/lib64/libc.so.6+0x2687e) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #7 0x7f1581d6779a in __assert_fail_base.cold (/lib64/libc.so.6+0x2679a) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #8 0x7f1581d77186 in __assert_fail (/lib64/libc.so.6+0x36186) (BuildId: c9f62793b9e886eb1b95077d4f26fe2b4aa1ac25)
+ #9 0x5344a4 in LLVMFuzzerTestOneInput /home/vagrant/avahi/fuzz/fuzz-domain.c:38:9
+```
+
+It's a follow-up to 94cb6489114636940ac683515417990b55b5d66c
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/20dec84b2480821704258bc908e7b2bd2e883b24]
+CVE: CVE-2023-38470 #Follow-up patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/domain.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-common/domain.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/domain.c
++++ avahi-0.7/avahi-common/domain.c
+@@ -210,7 +210,8 @@ char *avahi_normalize_name(const char *s
+ } else
+ empty = 0;
+
+- avahi_escape_label(label, strlen(label), &r, &size);
++ if (!(avahi_escape_label(label, strlen(label), &r, &size)))
++ return NULL;
+ }
+
+ return ret_s;
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
new file mode 100644
index 0000000000..c8d6a66174
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-1.patch
@@ -0,0 +1,73 @@
+From 894f085f402e023a98cbb6f5a3d117bd88d93b09 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Mon, 23 Oct 2023 13:38:35 +0200
+Subject: [PATCH] core: extract host name using avahi_unescape_label()
+
+Previously we could create invalid escape sequence when we split the
+string on dot. For example, from valid host name "foo\\.bar" we have
+created invalid name "foo\\" and tried to set that as the host name
+which crashed the daemon.
+
+Fixes #453
+
+CVE-2023-38471
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/894f085f402e023a98cbb6f5a3d117bd88d93b09]
+CVE: CVE-2023-38471
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/server.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+Index: avahi-0.7/avahi-core/server.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/server.c
++++ avahi-0.7/avahi-core/server.c
+@@ -1253,7 +1253,11 @@ static void update_fqdn(AvahiServer *s)
+ }
+
+ int avahi_server_set_host_name(AvahiServer *s, const char *host_name) {
+- char *hn = NULL;
++ char label_escaped[AVAHI_LABEL_MAX*4+1];
++ char label[AVAHI_LABEL_MAX];
++ char *hn = NULL, *h;
++ size_t len;
++
+ assert(s);
+
+ AVAHI_CHECK_VALIDITY(s, !host_name || avahi_is_valid_host_name(host_name), AVAHI_ERR_INVALID_HOST_NAME);
+@@ -1263,17 +1267,28 @@ int avahi_server_set_host_name(AvahiServ
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
+- hn[strcspn(hn, ".")] = 0;
++ h = hn;
++ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
++ avahi_free(h);
++ return AVAHI_ERR_INVALID_HOST_NAME;
++ }
++
++ avahi_free(h);
+
+- if (avahi_domain_equal(s->host_name, hn) && s->state != AVAHI_SERVER_COLLISION) {
+- avahi_free(hn);
++ h = label_escaped;
++ len = sizeof(label_escaped);
++ if (!avahi_escape_label(label, strlen(label), &h, &len))
++ return AVAHI_ERR_INVALID_HOST_NAME;
++
++ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+- }
+
+ withdraw_host_rrs(s);
+
+ avahi_free(s->host_name);
+- s->host_name = hn;
++ s->host_name = avahi_strdup(label_escaped);
++ if (!s->host_name)
++ return AVAHI_ERR_NO_MEMORY;
+
+ update_fqdn(s);
+
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
new file mode 100644
index 0000000000..a789b144ed
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38471-2.patch
@@ -0,0 +1,52 @@
+From b675f70739f404342f7f78635d6e2dcd85a13460 Mon Sep 17 00:00:00 2001
+From: Evgeny Vereshchagin <evvers@ya.ru>
+Date: Tue, 24 Oct 2023 22:04:51 +0000
+Subject: [PATCH] core: return errors from avahi_server_set_host_name properly
+
+It's a follow-up to 894f085f402e023a98cbb6f5a3d117bd88d93b09
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38471-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b675f70739f404342f7f78635d6e2dcd85a13460]
+CVE: CVE-2023-38471 #Follow-up Patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-core/server.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+Index: avahi-0.7/avahi-core/server.c
+===================================================================
+--- avahi-0.7.orig/avahi-core/server.c
++++ avahi-0.7/avahi-core/server.c
+@@ -1267,10 +1267,13 @@ int avahi_server_set_host_name(AvahiServ
+ else
+ hn = avahi_normalize_name_strdup(host_name);
+
++ if (!hn)
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
++
+ h = hn;
+ if (!avahi_unescape_label((const char **)&hn, label, sizeof(label))) {
+ avahi_free(h);
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+ }
+
+ avahi_free(h);
+@@ -1278,7 +1281,7 @@ int avahi_server_set_host_name(AvahiServ
+ h = label_escaped;
+ len = sizeof(label_escaped);
+ if (!avahi_escape_label(label, strlen(label), &h, &len))
+- return AVAHI_ERR_INVALID_HOST_NAME;
++ return avahi_server_set_errno(s, AVAHI_ERR_INVALID_HOST_NAME);
+
+ if (avahi_domain_equal(s->host_name, label_escaped) && s->state != AVAHI_SERVER_COLLISION)
+ return avahi_server_set_errno(s, AVAHI_ERR_NO_CHANGE);
+@@ -1288,7 +1291,7 @@ int avahi_server_set_host_name(AvahiServ
+ avahi_free(s->host_name);
+ s->host_name = avahi_strdup(label_escaped);
+ if (!s->host_name)
+- return AVAHI_ERR_NO_MEMORY;
++ return avahi_server_set_errno(s, AVAHI_ERR_NO_MEMORY);
+
+ update_fqdn(s);
+
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
new file mode 100644
index 0000000000..f49d990a42
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38472.patch
@@ -0,0 +1,45 @@
+From b024ae5749f4aeba03478e6391687c3c9c8dee40 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Thu, 19 Oct 2023 17:36:44 +0200
+Subject: [PATCH] core: make sure there is rdata to process before parsing it
+
+Fixes #452
+
+CVE-2023-38472
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38472.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b024ae5749f4aeba03478e6391687c3c9c8dee40]
+CVE: CVE-2023-38472
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-client/client-test.c | 3 +++
+ avahi-daemon/dbus-entry-group.c | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+Index: avahi-0.7/avahi-client/client-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-client/client-test.c
++++ avahi-0.7/avahi-client/client-test.c
+@@ -272,6 +272,9 @@ int main (AVAHI_GCC_UNUSED int argc, AVA
+ assert(error == AVAHI_ERR_INVALID_RECORD);
+ avahi_string_list_free(txt);
+
++ error = avahi_entry_group_add_record (group, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, 0, "TestX", 0x01, 0x10, 120, "", 0);
++ assert(error != AVAHI_OK);
++
+ avahi_entry_group_commit (group);
+
+ domain = avahi_domain_browser_new (avahi, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, NULL, AVAHI_DOMAIN_BROWSER_BROWSE, 0, avahi_domain_browser_callback, (char*) "omghai3u");
+Index: avahi-0.7/avahi-daemon/dbus-entry-group.c
+===================================================================
+--- avahi-0.7.orig/avahi-daemon/dbus-entry-group.c
++++ avahi-0.7/avahi-daemon/dbus-entry-group.c
+@@ -340,7 +340,7 @@ DBusHandlerResult avahi_dbus_msg_entry_g
+ if (!(r = avahi_record_new_full (name, clazz, type, ttl)))
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_NO_MEMORY, NULL);
+
+- if (avahi_rdata_parse (r, rdata, size) < 0) {
++ if (!rdata || avahi_rdata_parse (r, rdata, size) < 0) {
+ avahi_record_unref (r);
+ return avahi_dbus_respond_error(c, m, AVAHI_ERR_INVALID_RDATA, NULL);
+ }
diff --git a/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
new file mode 100644
index 0000000000..59f6806c85
--- /dev/null
+++ b/meta/recipes-connectivity/avahi/files/CVE-2023-38473.patch
@@ -0,0 +1,109 @@
+From b448c9f771bada14ae8de175695a9729f8646797 Mon Sep 17 00:00:00 2001
+From: Michal Sekletar <msekleta@redhat.com>
+Date: Wed, 11 Oct 2023 17:45:44 +0200
+Subject: [PATCH] common: derive alternative host name from its unescaped
+ version
+
+Normalization of input makes sure we don't have to deal with special
+cases like unescaped dot at the end of label.
+
+Fixes #451 #487
+CVE-2023-38473
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/avahi/tree/debian/patches/CVE-2023-38473.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/lathiat/avahi/commit/b448c9f771bada14ae8de175695a9729f8646797]
+CVE: CVE-2023-38473
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ avahi-common/alternative-test.c | 3 +++
+ avahi-common/alternative.c | 27 +++++++++++++++++++--------
+ 2 files changed, 22 insertions(+), 8 deletions(-)
+
+Index: avahi-0.7/avahi-common/alternative-test.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/alternative-test.c
++++ avahi-0.7/avahi-common/alternative-test.c
+@@ -31,6 +31,9 @@ int main(AVAHI_GCC_UNUSED int argc, AVAH
+ const char* const test_strings[] = {
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXüüüüüüü",
++ ").",
++ "\\.",
++ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\\\",
+ "gurke",
+ "-",
+ " #",
+Index: avahi-0.7/avahi-common/alternative.c
+===================================================================
+--- avahi-0.7.orig/avahi-common/alternative.c
++++ avahi-0.7/avahi-common/alternative.c
+@@ -49,15 +49,20 @@ static void drop_incomplete_utf8(char *c
+ }
+
+ char *avahi_alternative_host_name(const char *s) {
++ char label[AVAHI_LABEL_MAX], alternative[AVAHI_LABEL_MAX*4+1];
++ char *alt, *r, *ret;
+ const char *e;
+- char *r;
++ size_t len;
+
+ assert(s);
+
+ if (!avahi_is_valid_host_name(s))
+ return NULL;
+
+- if ((e = strrchr(s, '-'))) {
++ if (!avahi_unescape_label(&s, label, sizeof(label)))
++ return NULL;
++
++ if ((e = strrchr(label, '-'))) {
+ const char *p;
+
+ e++;
+@@ -74,19 +79,18 @@ char *avahi_alternative_host_name(const
+
+ if (e) {
+ char *c, *m;
+- size_t l;
+ int n;
+
+ n = atoi(e)+1;
+ if (!(m = avahi_strdup_printf("%i", n)))
+ return NULL;
+
+- l = e-s-1;
++ len = e-label-1;
+
+- if (l >= AVAHI_LABEL_MAX-1-strlen(m)-1)
+- l = AVAHI_LABEL_MAX-1-strlen(m)-1;
++ if (len >= AVAHI_LABEL_MAX-1-strlen(m)-1)
++ len = AVAHI_LABEL_MAX-1-strlen(m)-1;
+
+- if (!(c = avahi_strndup(s, l))) {
++ if (!(c = avahi_strndup(label, len))) {
+ avahi_free(m);
+ return NULL;
+ }
+@@ -100,7 +104,7 @@ char *avahi_alternative_host_name(const
+ } else {
+ char *c;
+
+- if (!(c = avahi_strndup(s, AVAHI_LABEL_MAX-1-2)))
++ if (!(c = avahi_strndup(label, AVAHI_LABEL_MAX-1-2)))
+ return NULL;
+
+ drop_incomplete_utf8(c);
+@@ -109,6 +113,13 @@ char *avahi_alternative_host_name(const
+ avahi_free(c);
+ }
+
++ alt = alternative;
++ len = sizeof(alternative);
++ ret = avahi_escape_label(r, strlen(r), &alt, &len);
++
++ avahi_free(r);
++ r = avahi_strdup(ret);
++
+ assert(avahi_is_valid_host_name(r));
+
+ return r;
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch
new file mode 100644
index 0000000000..940c6776d3
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-2795.patch
@@ -0,0 +1,67 @@
+From 36c878a0124973f29b7ca49e6bb18310f9b2601f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Micha=C5=82=20K=C4=99pie=C5=84?= <michal@isc.org>
+Date: Thu, 8 Sep 2022 11:11:30 +0200
+Subject: [PATCH 1/3] Bound the amount of work performed for delegations
+
+Limit the amount of database lookups that can be triggered in
+fctx_getaddresses() (i.e. when determining the name server addresses to
+query next) by setting a hard limit on the number of NS RRs processed
+for any delegation encountered. Without any limit in place, named can
+be forced to perform large amounts of database lookups per each query
+received, which severely impacts resolver performance.
+
+The limit used (20) is an arbitrary value that is considered to be big
+enough for any sane DNS delegation.
+
+(cherry picked from commit 3a44097fd6c6c260765b628cd1d2c9cb7efb0b2a)
+
+Upstream-Status: Backport
+CVE: CVE-2022-2795
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/bf2ea6d8525bfd96a84dad221ba9e004adb710a8
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/resolver.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c
+index 8ae9a993bbd7..ac9a9ef5d009 100644
+--- a/lib/dns/resolver.c
++++ b/lib/dns/resolver.c
+@@ -180,6 +180,12 @@
+ */
+ #define NS_FAIL_LIMIT 4
+ #define NS_RR_LIMIT 5
++/*
++ * IP address lookups are performed for at most NS_PROCESSING_LIMIT NS RRs in
++ * any NS RRset encountered, to avoid excessive resource use while processing
++ * large delegations.
++ */
++#define NS_PROCESSING_LIMIT 20
+
+ /* Number of hash buckets for zone counters */
+ #ifndef RES_DOMAIN_BUCKETS
+@@ -3318,6 +3324,7 @@ fctx_getaddresses(fetchctx_t *fctx, bool badcache) {
+ bool need_alternate = false;
+ bool all_spilled = true;
+ unsigned int no_addresses = 0;
++ unsigned int ns_processed = 0;
+
+ FCTXTRACE5("getaddresses", "fctx->depth=", fctx->depth);
+
+@@ -3504,6 +3511,11 @@ fctx_getaddresses(fetchctx_t *fctx, bool badcache) {
+
+ dns_rdata_reset(&rdata);
+ dns_rdata_freestruct(&ns);
++
++ if (++ns_processed >= NS_PROCESSING_LIMIT) {
++ result = ISC_R_NOMORE;
++ break;
++ }
+ }
+ if (result != ISC_R_NOMORE) {
+ return (result);
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch
new file mode 100644
index 0000000000..0ef87fd260
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-38177.patch
@@ -0,0 +1,31 @@
+From ef3d1a84ff807eea27b4fef601a15932c5ffbfbf Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Thu, 11 Aug 2022 15:15:34 +1000
+Subject: [PATCH 2/3] Free eckey on siglen mismatch
+
+Upstream-Status: Backport
+CVE: CVE-2022-38177
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/5b2282afff760b1ed3471f6666bdfe8e1d34e590
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/opensslecdsa_link.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/dns/opensslecdsa_link.c b/lib/dns/opensslecdsa_link.c
+index 83b5b51cd78c..7576e04ac635 100644
+--- a/lib/dns/opensslecdsa_link.c
++++ b/lib/dns/opensslecdsa_link.c
+@@ -224,7 +224,7 @@ opensslecdsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ siglen = DNS_SIG_ECDSA384SIZE;
+
+ if (sig->length != siglen)
+- return (DST_R_VERIFYFAILURE);
++ DST_RET(DST_R_VERIFYFAILURE);
+
+ if (!EVP_DigestFinal_ex(evp_md_ctx, digest, &dgstlen))
+ DST_RET (dst__openssl_toresult3(dctx->category,
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch b/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch
new file mode 100644
index 0000000000..e0b398e24a
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2022-38178.patch
@@ -0,0 +1,33 @@
+From 65f5b2f0162d5d2ab25f463aa14a8bae71ace3d9 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Thu, 11 Aug 2022 15:28:13 +1000
+Subject: [PATCH 3/3] Free ctx on invalid siglen
+
+(cherry picked from commit 6ddb480a84836641a0711768a94122972c166825)
+
+Upstream-Status: Backport
+CVE: CVE-2022-38178
+Reference to upstream patch:
+https://gitlab.isc.org/isc-projects/bind9/-/commit/1af23378ebb11da2eb0f412e4563d6
+
+Signed-off-by: Mathieu Dubois-Briand <mbriand@witekio.com>
+---
+ lib/dns/openssleddsa_link.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/dns/openssleddsa_link.c b/lib/dns/openssleddsa_link.c
+index 8b115ec283f0..b4fcd607c131 100644
+--- a/lib/dns/openssleddsa_link.c
++++ b/lib/dns/openssleddsa_link.c
+@@ -325,7 +325,7 @@ openssleddsa_verify(dst_context_t *dctx, const isc_region_t *sig) {
+ siglen = DNS_SIG_ED448SIZE;
+
+ if (sig->length != siglen)
+- return (DST_R_VERIFYFAILURE);
++ DST_RET(ISC_R_NOTIMPLEMENTED);
+
+ isc_buffer_usedregion(buf, &tbsreg);
+
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch b/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch
new file mode 100644
index 0000000000..6f6c104530
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2023-2828.patch
@@ -0,0 +1,166 @@
+
+Upstream-Status: Backport [import from debian security.debian.org/debian-security/pool/updates/main/b/bind9/bind9_9.11.5.P4+dfsg-5.1+deb10u9.debian.tar.xz
+Upstream patch https://downloads.isc.org/isc/bind9/9.16.42/patches/0001-CVE-2023-2828.patch]
+Upstream Commit: https://github.com/isc-projects/bind9/commit/da0eafcdee52147e72d407cc3b9f179378ee1d3a
+CVE: CVE-2023-2828
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+
+---
+ lib/dns/rbtdb.c | 106 +++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 63 insertions(+), 43 deletions(-)
+
+diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c
+index b1b928c..3165e26 100644
+--- a/lib/dns/rbtdb.c
++++ b/lib/dns/rbtdb.c
+@@ -792,7 +792,7 @@ static void update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ static void expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ bool tree_locked, expire_t reason);
+ static void overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+- isc_stdtime_t now, bool tree_locked);
++ size_t purgesize, bool tree_locked);
+ static isc_result_t resign_insert(dns_rbtdb_t *rbtdb, int idx,
+ rdatasetheader_t *newheader);
+ static void resign_delete(dns_rbtdb_t *rbtdb, rbtdb_version_t *version,
+@@ -6784,6 +6784,16 @@ addclosest(dns_rbtdb_t *rbtdb, rdatasetheader_t *newheader,
+
+ static dns_dbmethods_t zone_methods;
+
++static size_t
++rdataset_size(rdatasetheader_t *header) {
++ if (!NONEXISTENT(header)) {
++ return (dns_rdataslab_size((unsigned char *)header,
++ sizeof(*header)));
++ }
++
++ return (sizeof(*header));
++}
++
+ static isc_result_t
+ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
+@@ -6932,7 +6942,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ }
+
+ if (cache_is_overmem)
+- overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked);
++ overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader),
++ tree_locked);
+
+ NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock,
+ isc_rwlocktype_write);
+@@ -6947,9 +6958,14 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
+ cleanup_dead_nodes(rbtdb, rbtnode->locknum);
+
+ header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
+- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL)
+- expire_header(rbtdb, header, tree_locked,
+- expire_ttl);
++ if (header != NULL) {
++ dns_ttl_t rdh_ttl = header->rdh_ttl;
++
++ if (rdh_ttl < now - RBTDB_VIRTUAL) {
++ expire_header(rbtdb, header, tree_locked,
++ expire_ttl);
++ }
++ }
+
+ /*
+ * If we've been holding a write lock on the tree just for
+@@ -10388,54 +10404,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
+ ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link);
+ }
+
++static size_t
++expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize,
++ bool tree_locked) {
++ rdatasetheader_t *header, *header_prev;
++ size_t purged = 0;
++
++ for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
++ header != NULL && purged <= purgesize; header = header_prev)
++ {
++ header_prev = ISC_LIST_PREV(header, link);
++ /*
++ * Unlink the entry at this point to avoid checking it
++ * again even if it's currently used someone else and
++ * cannot be purged at this moment. This entry won't be
++ * referenced any more (so unlinking is safe) since the
++ * TTL was reset to 0.
++ */
++ ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link);
++ size_t header_size = rdataset_size(header);
++ expire_header(rbtdb, header, tree_locked, expire_lru);
++ purged += header_size;
++ }
++
++ return (purged);
++}
++
+ /*%
+- * Purge some expired and/or stale (i.e. unused for some period) cache entries
+- * under an overmem condition. To recover from this condition quickly, up to
+- * 2 entries will be purged. This process is triggered while adding a new
+- * entry, and we specifically avoid purging entries in the same LRU bucket as
+- * the one to which the new entry will belong. Otherwise, we might purge
+- * entries of the same name of different RR types while adding RRsets from a
+- * single response (consider the case where we're adding A and AAAA glue records
+- * of the same NS name).
+- */
++ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache
++ * entries under the overmem condition. To recover from this condition quickly,
++ * we cleanup entries up to the size of newly added rdata (passed as purgesize).
++ *
++ * This process is triggered while adding a new entry, and we specifically avoid
++ * purging entries in the same LRU bucket as the one to which the new entry will
++ * belong. Otherwise, we might purge entries of the same name of different RR
++ * types while adding RRsets from a single response (consider the case where
++ * we're adding A and AAAA glue records of the same NS name).
++*/
+ static void
+-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
+- isc_stdtime_t now, bool tree_locked)
++overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
++ bool tree_locked)
+ {
+- rdatasetheader_t *header, *header_prev;
+ unsigned int locknum;
+- int purgecount = 2;
++ size_t purged = 0;
+
+ for (locknum = (locknum_start + 1) % rbtdb->node_lock_count;
+- locknum != locknum_start && purgecount > 0;
++ locknum != locknum_start && purged <= purgesize;
+ locknum = (locknum + 1) % rbtdb->node_lock_count) {
+ NODE_LOCK(&rbtdb->node_locks[locknum].lock,
+ isc_rwlocktype_write);
+
+- header = isc_heap_element(rbtdb->heaps[locknum], 1);
+- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) {
+- expire_header(rbtdb, header, tree_locked,
+- expire_ttl);
+- purgecount--;
+- }
+-
+- for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
+- header != NULL && purgecount > 0;
+- header = header_prev) {
+- header_prev = ISC_LIST_PREV(header, link);
+- /*
+- * Unlink the entry at this point to avoid checking it
+- * again even if it's currently used someone else and
+- * cannot be purged at this moment. This entry won't be
+- * referenced any more (so unlinking is safe) since the
+- * TTL was reset to 0.
+- */
+- ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header,
+- link);
+- expire_header(rbtdb, header, tree_locked,
+- expire_lru);
+- purgecount--;
+- }
++ purged += expire_lru_headers(rbtdb, locknum, purgesize - purged,
++ tree_locked);
+
+ NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
+ isc_rwlocktype_write);
diff --git a/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch b/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch
new file mode 100644
index 0000000000..be479cb00e
--- /dev/null
+++ b/meta/recipes-connectivity/bind/bind/CVE-2023-3341.patch
@@ -0,0 +1,175 @@
+From c4fac5ca98efd02fbaef43601627c7a3a09f5a71 Mon Sep 17 00:00:00 2001
+From: Mark Andrews <marka@isc.org>
+Date: Tue, 20 Jun 2023 15:21:36 +1000
+Subject: [PATCH] Limit isccc_cc_fromwire recursion depth
+
+Named and rndc do not need a lot of recursion so the depth is
+set to 10.
+
+Taken from BIND 9.16.44 change.
+
+Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/-/commit/c4fac5ca98efd02fbaef43601627c7a3a09f5a71]
+CVE: CVE-2023-3341
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/isccc/cc.c | 38 +++++++++++++++++++++++---------
+ lib/isccc/include/isccc/result.h | 4 +++-
+ lib/isccc/result.c | 4 +++-
+ 3 files changed, 34 insertions(+), 12 deletions(-)
+
+diff --git a/lib/isccc/cc.c b/lib/isccc/cc.c
+index e012685..8eac3d6 100644
+--- a/lib/isccc/cc.c
++++ b/lib/isccc/cc.c
+@@ -53,6 +53,10 @@
+
+ #define MAX_TAGS 256
+ #define DUP_LIFETIME 900
++#ifndef ISCCC_MAXDEPTH
++#define ISCCC_MAXDEPTH \
++ 10 /* Big enough for rndc which just sends a string each way. */
++#endif
+
+ typedef isccc_sexpr_t *sexpr_ptr;
+
+@@ -561,19 +565,25 @@ verify(isccc_sexpr_t *alist, unsigned char *data, unsigned int length,
+
+ static isc_result_t
+ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+- uint32_t algorithm, isccc_sexpr_t **alistp);
++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp);
+
+ static isc_result_t
+-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp);
++list_fromwire(isccc_region_t *source, unsigned int depth,
++ isccc_sexpr_t **listp);
+
+ static isc_result_t
+-value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
++value_fromwire(isccc_region_t *source, unsigned int depth,
++ isccc_sexpr_t **valuep) {
+ unsigned int msgtype;
+ uint32_t len;
+ isccc_sexpr_t *value;
+ isccc_region_t active;
+ isc_result_t result;
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ if (REGION_SIZE(*source) < 1 + 4)
+ return (ISC_R_UNEXPECTEDEND);
+ GET8(msgtype, source->rstart);
+@@ -591,9 +601,9 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
+ } else
+ result = ISC_R_NOMEMORY;
+ } else if (msgtype == ISCCC_CCMSGTYPE_TABLE)
+- result = table_fromwire(&active, NULL, 0, valuep);
++ result = table_fromwire(&active, NULL, 0, depth + 1, valuep);
+ else if (msgtype == ISCCC_CCMSGTYPE_LIST)
+- result = list_fromwire(&active, valuep);
++ result = list_fromwire(&active, depth + 1, valuep);
+ else
+ result = ISCCC_R_SYNTAX;
+
+@@ -602,7 +612,7 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
+
+ static isc_result_t
+ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+- uint32_t algorithm, isccc_sexpr_t **alistp)
++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp)
+ {
+ char key[256];
+ uint32_t len;
+@@ -613,6 +623,10 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+
+ REQUIRE(alistp != NULL && *alistp == NULL);
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ checksum_rstart = NULL;
+ first_tag = true;
+ alist = isccc_alist_create();
+@@ -628,7 +642,7 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+ GET_MEM(key, len, source->rstart);
+ key[len] = '\0'; /* Ensure NUL termination. */
+ value = NULL;
+- result = value_fromwire(source, &value);
++ result = value_fromwire(source, depth + 1, &value);
+ if (result != ISC_R_SUCCESS)
+ goto bad;
+ if (isccc_alist_define(alist, key, value) == NULL) {
+@@ -661,14 +675,18 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
+ }
+
+ static isc_result_t
+-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp) {
++list_fromwire(isccc_region_t *source, unsigned int depth, isccc_sexpr_t **listp) {
+ isccc_sexpr_t *list, *value;
+ isc_result_t result;
+
++ if (depth > ISCCC_MAXDEPTH) {
++ return (ISCCC_R_MAXDEPTH);
++ }
++
+ list = NULL;
+ while (!REGION_EMPTY(*source)) {
+ value = NULL;
+- result = value_fromwire(source, &value);
++ result = value_fromwire(source, depth + 1, &value);
+ if (result != ISC_R_SUCCESS) {
+ isccc_sexpr_free(&list);
+ return (result);
+@@ -699,7 +717,7 @@ isccc_cc_fromwire(isccc_region_t *source, isccc_sexpr_t **alistp,
+ if (version != 1)
+ return (ISCCC_R_UNKNOWNVERSION);
+
+- return (table_fromwire(source, secret, algorithm, alistp));
++ return (table_fromwire(source, secret, algorithm, 0, alistp));
+ }
+
+ static isc_result_t
+diff --git a/lib/isccc/include/isccc/result.h b/lib/isccc/include/isccc/result.h
+index 6c79dd7..a85861c 100644
+--- a/lib/isccc/include/isccc/result.h
++++ b/lib/isccc/include/isccc/result.h
+@@ -47,8 +47,10 @@
+ #define ISCCC_R_CLOCKSKEW (ISC_RESULTCLASS_ISCCC + 4)
+ /*% Duplicate */
+ #define ISCCC_R_DUPLICATE (ISC_RESULTCLASS_ISCCC + 5)
++/*% Maximum recursion depth */
++#define ISCCC_R_MAXDEPTH (ISC_RESULTCLASS_ISCCC + 6)
+
+-#define ISCCC_R_NRESULTS 6 /*%< Number of results */
++#define ISCCC_R_NRESULTS 7 /*%< Number of results */
+
+ ISC_LANG_BEGINDECLS
+
+diff --git a/lib/isccc/result.c b/lib/isccc/result.c
+index 8419bbb..325200b 100644
+--- a/lib/isccc/result.c
++++ b/lib/isccc/result.c
+@@ -40,7 +40,8 @@ static const char *text[ISCCC_R_NRESULTS] = {
+ "bad auth", /* 3 */
+ "expired", /* 4 */
+ "clock skew", /* 5 */
+- "duplicate" /* 6 */
++ "duplicate", /* 6 */
++ "max depth", /* 7 */
+ };
+
+ static const char *ids[ISCCC_R_NRESULTS] = {
+@@ -50,6 +51,7 @@ static const char *ids[ISCCC_R_NRESULTS] = {
+ "ISCCC_R_EXPIRED",
+ "ISCCC_R_CLOCKSKEW",
+ "ISCCC_R_DUPLICATE",
++ "ISCCC_R_MAXDEPTH",
+ };
+
+ #define ISCCC_RESULT_RESULTSET 2
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bind/bind_9.11.37.bb b/meta/recipes-connectivity/bind/bind_9.11.37.bb
index afc8cf0b3b..95bb5be005 100644
--- a/meta/recipes-connectivity/bind/bind_9.11.37.bb
+++ b/meta/recipes-connectivity/bind/bind_9.11.37.bb
@@ -19,6 +19,11 @@ SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \
file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \
file://0001-avoid-start-failure-with-bind-user.patch \
+ file://CVE-2022-2795.patch \
+ file://CVE-2022-38177.patch \
+ file://CVE-2022-38178.patch \
+ file://CVE-2023-2828.patch \
+ file://CVE-2023-3341.patch \
"
SRC_URI[sha256sum] = "0d8efbe7ec166ada90e46add4267b7e7c934790cba9bd5af6b8380a4fbfb5aff"
diff --git a/meta/recipes-connectivity/bluez5/bluez5.inc b/meta/recipes-connectivity/bluez5/bluez5.inc
index 4d4348898a..74fd344170 100644
--- a/meta/recipes-connectivity/bluez5/bluez5.inc
+++ b/meta/recipes-connectivity/bluez5/bluez5.inc
@@ -7,6 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=12f884d2ae1ff87c09e5b7ccc2c4ca7e \
file://COPYING.LIB;md5=fb504b67c50331fc78734fed90fb0e09 \
file://src/main.c;beginline=1;endline=24;md5=9bc54b93cd7e17bf03f52513f39f926e"
DEPENDS = "dbus glib-2.0"
+RDEPENDS:${PN} += "dbus"
PROVIDES += "bluez-hcidump"
RPROVIDES_${PN} += "bluez-hcidump"
@@ -56,6 +57,9 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/bluetooth/bluez-${PV}.tar.xz \
file://CVE-2021-3588.patch \
file://CVE-2021-3658.patch \
file://CVE-2022-0204.patch \
+ file://CVE-2022-39176.patch \
+ file://CVE-2022-3637.patch \
+ file://CVE-2023-45866.patch \
"
S = "${WORKDIR}/bluez-${PV}"
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch
new file mode 100644
index 0000000000..4ca60f99d5
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-3637.patch
@@ -0,0 +1,39 @@
+From b808b2852a0b48c6f9dbb038f932613cea3126c2 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 27 Oct 2022 09:51:27 +0530
+Subject: [PATCH] CVE-2022-3637
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/monitor/jlink.c?id=1d6cfb8e625a944010956714c1802bc1e1fc6c4f]
+CVE: CVE-2022-3637
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+monitor: Fix crash when using RTT backend
+
+This fix regression introduced by "monitor: Fix memory leaks".
+J-Link shared library is in use if jlink_init() returns 0 and thus
+handle shall not be closed.
+---
+ monitor/jlink.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/monitor/jlink.c b/monitor/jlink.c
+index afa9d93..5bd4aed 100644
+--- a/monitor/jlink.c
++++ b/monitor/jlink.c
+@@ -120,9 +120,12 @@ int jlink_init(void)
+ !jlink.tif_select || !jlink.setspeed ||
+ !jlink.connect || !jlink.getsn ||
+ !jlink.emu_getproductname ||
+- !jlink.rtterminal_control || !jlink.rtterminal_read)
++ !jlink.rtterminal_control || !jlink.rtterminal_read) {
++ dlclose(so);
+ return -EIO;
++ }
+
++ /* don't dlclose(so) here cause symbols from it are in use now */
+ return 0;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch
new file mode 100644
index 0000000000..7bd1f5f80f
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2022-39176.patch
@@ -0,0 +1,126 @@
+From 752c7f707c3cc1eb12eadc13bc336a5c484d4bdf Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 28 Sep 2022 10:45:53 +0530
+Subject: [PATCH] CVE-2022-39176
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+source/bluez/5.53-0ubuntu3.6]
+CVE: CVE-2022-39176
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ profiles/audio/avdtp.c | 56 +++++++++++++++++++++++++++---------------
+ profiles/audio/avrcp.c | 8 ++++++
+ 2 files changed, 44 insertions(+), 20 deletions(-)
+
+diff --git a/profiles/audio/avdtp.c b/profiles/audio/avdtp.c
+index 782268c..0adf413 100644
+--- a/profiles/audio/avdtp.c
++++ b/profiles/audio/avdtp.c
+@@ -1261,43 +1261,53 @@ struct avdtp_remote_sep *avdtp_find_remote_sep(struct avdtp *session,
+ return NULL;
+ }
+
+-static GSList *caps_to_list(uint8_t *data, int size,
++static GSList *caps_to_list(uint8_t *data, size_t size,
+ struct avdtp_service_capability **codec,
+ gboolean *delay_reporting)
+ {
++ struct avdtp_service_capability *cap;
+ GSList *caps;
+- int processed;
+
+ if (delay_reporting)
+ *delay_reporting = FALSE;
+
+- for (processed = 0, caps = NULL; processed + 2 <= size;) {
+- struct avdtp_service_capability *cap;
+- uint8_t length, category;
++ if (size < sizeof(*cap))
++ return NULL;
++
++ for (caps = NULL; size >= sizeof(*cap);) {
++ struct avdtp_service_capability *cpy;
+
+- category = data[0];
+- length = data[1];
++ cap = (struct avdtp_service_capability *)data;
+
+- if (processed + 2 + length > size) {
++ if (sizeof(*cap) + cap->length > size) {
+ error("Invalid capability data in getcap resp");
+ break;
+ }
+
+- cap = g_malloc(sizeof(struct avdtp_service_capability) +
+- length);
+- memcpy(cap, data, 2 + length);
++ if (cap->category == AVDTP_MEDIA_CODEC &&
++ cap->length < sizeof(**codec)) {
++ error("Invalid codec data in getcap resp");
++ break;
++ }
++
++ cpy = btd_malloc(sizeof(*cpy) + cap->length);
++ memcpy(cpy, cap, sizeof(*cap) + cap->length);
+
+- processed += 2 + length;
+- data += 2 + length;
++ size -= sizeof(*cap) + cap->length;
++ data += sizeof(*cap) + cap->length;
+
+- caps = g_slist_append(caps, cap);
++ caps = g_slist_append(caps, cpy);
+
+- if (category == AVDTP_MEDIA_CODEC &&
+- length >=
+- sizeof(struct avdtp_media_codec_capability))
+- *codec = cap;
+- else if (category == AVDTP_DELAY_REPORTING && delay_reporting)
+- *delay_reporting = TRUE;
++ switch (cap->category) {
++ case AVDTP_MEDIA_CODEC:
++ if (codec)
++ *codec = cpy;
++ break;
++ case AVDTP_DELAY_REPORTING:
++ if (delay_reporting)
++ *delay_reporting = TRUE;
++ break;
++ }
+ }
+
+ return caps;
+@@ -1494,6 +1504,12 @@ static gboolean avdtp_setconf_cmd(struct avdtp *session, uint8_t transaction,
+ &stream->codec,
+ &stream->delay_reporting);
+
++ if (!stream->caps || !stream->codec) {
++ err = AVDTP_UNSUPPORTED_CONFIGURATION;
++ category = 0x00;
++ goto failed_stream;
++ }
++
+ /* Verify that the Media Transport capability's length = 0. Reject otherwise */
+ for (l = stream->caps; l != NULL; l = g_slist_next(l)) {
+ struct avdtp_service_capability *cap = l->data;
+diff --git a/profiles/audio/avrcp.c b/profiles/audio/avrcp.c
+index d9471c0..0233d53 100644
+--- a/profiles/audio/avrcp.c
++++ b/profiles/audio/avrcp.c
+@@ -1916,6 +1916,14 @@ static size_t handle_vendordep_pdu(struct avctp *conn, uint8_t transaction,
+ goto err_metadata;
+ }
+
++ operands += sizeof(*pdu);
++ operand_count -= sizeof(*pdu);
++
++ if (pdu->params_len != operand_count) {
++ DBG("AVRCP PDU parameters length don't match");
++ pdu->params_len = operand_count;
++ }
++
+ for (handler = session->control_handlers; handler->pdu_id; handler++) {
+ if (handler->pdu_id == pdu->pdu_id)
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
new file mode 100644
index 0000000000..43670ab2b3
--- /dev/null
+++ b/meta/recipes-connectivity/bluez5/bluez5/CVE-2023-45866.patch
@@ -0,0 +1,54 @@
+From 25a471a83e02e1effb15d5a488b3f0085eaeb675 Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Tue, 10 Oct 2023 13:03:12 -0700
+Subject: input.conf: Change default of ClassicBondedOnly
+
+This changes the default of ClassicBondedOnly since defaulting to false
+is not inline with HID specification which mandates the of Security Mode
+4:
+
+BLUETOOTH SPECIFICATION Page 84 of 123
+Human Interface Device (HID) Profile:
+
+5.4.3.4.2 Security Modes
+Bluetooth HID Hosts shall use Security Mode 4 when interoperating with
+Bluetooth HID devices that are compliant to the Bluetooth Core
+Specification v2.1+EDR[6].
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/bluetooth/bluez.git/commit/?id=25a471a83e02e1effb15d5a488b3f0085eaeb675]
+CVE: CVE-2023-45866
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ profiles/input/device.c | 2 +-
+ profiles/input/input.conf | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/profiles/input/device.c b/profiles/input/device.c
+index 375314e..0236488 100644
+--- a/profiles/input/device.c
++++ b/profiles/input/device.c
+@@ -93,7 +93,7 @@ struct input_device {
+
+ static int idle_timeout = 0;
+ static bool uhid_enabled = false;
+-static bool classic_bonded_only = false;
++static bool classic_bonded_only = true;
+
+ void input_set_idle_timeout(int timeout)
+ {
+diff --git a/profiles/input/input.conf b/profiles/input/input.conf
+index 4c70bc5..d8645f3 100644
+--- a/profiles/input/input.conf
++++ b/profiles/input/input.conf
+@@ -17,7 +17,7 @@
+ # platforms may want to make sure that input connections only come from bonded
+ # device connections. Several older mice have been known for not supporting
+ # pairing/encryption.
+-# Defaults to false to maximize device compatibility.
++# Defaults to true for security.
+ #ClassicBondedOnly=true
+
+ # LE upgrade security
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/bluez5/bluez5_5.55.bb b/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
index e5353bd815..be74a35e0a 100644
--- a/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
+++ b/meta/recipes-connectivity/bluez5/bluez5_5.55.bb
@@ -6,6 +6,13 @@ SRC_URI[sha256sum] = "8863717113c4897e2ad3271fc808ea245319e6fd95eed2e934fae8e089
# These issues have kernel fixes rather than bluez fixes so exclude here
CVE_CHECK_WHITELIST += "CVE-2020-12352 CVE-2020-24490"
+# Commit 7a80d2096f1b7125085e21448112aa02f49f5e9a, e2b0f0d8d63e1223bb714a9efb37e2257818268b
+# and 0388794dc5fdb73a4ea88bcf148de0a12b4364d4 to fix CVE-2022-39177
+# already backport in CVE-2022-39176.patch
+# https://bugs.launchpad.net/ubuntu/+source/bluez/+bug/1977968
+
+CVE_CHECK_WHITELIST += "CVE-2022-39177"
+
# noinst programs in Makefile.tools that are conditional on READLINE
# support
NOINST_TOOLS_READLINE ?= " \
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
new file mode 100644
index 0000000000..74a739d6a2
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32292.patch
@@ -0,0 +1,37 @@
+From d1a5ede5d255bde8ef707f8441b997563b9312bd Mon Sep 17 00:00:00 2001
+From: Nathan Crandall <ncrandall@tesla.com>
+Date: Tue, 12 Jul 2022 08:56:34 +0200
+Subject: gweb: Fix OOB write in received_data()
+
+There is a mismatch of handling binary vs. C-string data with memchr
+and strlen, resulting in pos, count, and bytes_read to become out of
+sync and result in a heap overflow. Instead, do not treat the buffer
+as an ASCII C-string. We calculate the count based on the return value
+of memchr, instead of strlen.
+
+Fixes: CVE-2022-32292
+
+Upstream-Status: Backport
+https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=d1a5ede5d255bde8ef707f8441b997563b9312b
+CVE: CVE-2022-32292
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+---
+ gweb/gweb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gweb/gweb.c b/gweb/gweb.c
+index 12fcb1d8..13c6c5f2 100644
+--- a/gweb/gweb.c
++++ b/gweb/gweb.c
+@@ -918,7 +918,7 @@ static gboolean received_data(GIOChannel *channel, GIOCondition cond,
+ }
+
+ *pos = '\0';
+- count = strlen((char *) ptr);
++ count = pos - ptr;
+ if (count > 0 && ptr[count - 1] == '\r') {
+ ptr[--count] = '\0';
+ bytes_read--;
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch b/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch
new file mode 100644
index 0000000000..83a013981c
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2022-32293.patch
@@ -0,0 +1,266 @@
+From 358a44b1442fae0f82846e10da0708b5c4e1ce27 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 20 Sep 2022 17:58:19 +0530
+Subject: [PATCH] CVE-2022-32293
+
+CVE: CVE-2022-32293
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/commit/?id=72343929836de80727a27d6744c869dff045757c && https://git.kernel.org/pub/scm/network/connman/connman.git/commit/src/wispr.c?id=416bfaff988882c553c672e5bfc2d4f648d29e8a]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/wispr.c | 83 ++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 63 insertions(+), 20 deletions(-)
+
+diff --git a/src/wispr.c b/src/wispr.c
+index 473c0e0..97e0242 100644
+--- a/src/wispr.c
++++ b/src/wispr.c
+@@ -59,6 +59,7 @@ struct wispr_route {
+ };
+
+ struct connman_wispr_portal_context {
++ int refcount;
+ struct connman_service *service;
+ enum connman_ipconfig_type type;
+ struct connman_wispr_portal *wispr_portal;
+@@ -96,10 +97,13 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data);
+
+ static GHashTable *wispr_portal_list = NULL;
+
++#define wispr_portal_context_ref(wp_context) \
++ wispr_portal_context_ref_debug(wp_context, __FILE__, __LINE__, __func__)
++#define wispr_portal_context_unref(wp_context) \
++ wispr_portal_context_unref_debug(wp_context, __FILE__, __LINE__, __func__)
++
+ static void connman_wispr_message_init(struct connman_wispr_message *msg)
+ {
+- DBG("");
+-
+ msg->has_error = false;
+ msg->current_element = NULL;
+
+@@ -159,11 +163,6 @@ static void free_wispr_routes(struct connman_wispr_portal_context *wp_context)
+ static void free_connman_wispr_portal_context(
+ struct connman_wispr_portal_context *wp_context)
+ {
+- DBG("context %p", wp_context);
+-
+- if (!wp_context)
+- return;
+-
+ if (wp_context->wispr_portal) {
+ if (wp_context->wispr_portal->ipv4_context == wp_context)
+ wp_context->wispr_portal->ipv4_context = NULL;
+@@ -200,9 +199,38 @@ static void free_connman_wispr_portal_context(
+ g_free(wp_context);
+ }
+
++static struct connman_wispr_portal_context *
++wispr_portal_context_ref_debug(struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount + 1, file, line, caller);
++
++ __sync_fetch_and_add(&wp_context->refcount, 1);
++
++ return wp_context;
++}
++
++static void wispr_portal_context_unref_debug(
++ struct connman_wispr_portal_context *wp_context,
++ const char *file, int line, const char *caller)
++{
++ if (!wp_context)
++ return;
++
++ DBG("%p ref %d by %s:%d:%s()", wp_context,
++ wp_context->refcount - 1, file, line, caller);
++
++ if (__sync_fetch_and_sub(&wp_context->refcount, 1) != 1)
++ return;
++
++ free_connman_wispr_portal_context(wp_context);
++}
++
+ static struct connman_wispr_portal_context *create_wispr_portal_context(void)
+ {
+- return g_try_new0(struct connman_wispr_portal_context, 1);
++ return wispr_portal_context_ref(
++ g_new0(struct connman_wispr_portal_context, 1));
+ }
+
+ static void free_connman_wispr_portal(gpointer data)
+@@ -214,8 +242,8 @@ static void free_connman_wispr_portal(gpointer data)
+ if (!wispr_portal)
+ return;
+
+- free_connman_wispr_portal_context(wispr_portal->ipv4_context);
+- free_connman_wispr_portal_context(wispr_portal->ipv6_context);
++ wispr_portal_context_unref(wispr_portal->ipv4_context);
++ wispr_portal_context_unref(wispr_portal->ipv6_context);
+
+ g_free(wispr_portal);
+ }
+@@ -450,8 +478,6 @@ static void portal_manage_status(GWebResult *result,
+ &str))
+ connman_info("Client-Timezone: %s", str);
+
+- free_connman_wispr_portal_context(wp_context);
+-
+ __connman_service_ipconfig_indicate_state(service,
+ CONNMAN_SERVICE_STATE_ONLINE, type);
+ }
+@@ -509,14 +535,17 @@ static void wispr_portal_request_portal(
+ {
+ DBG("");
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ wp_context->status_url,
+ wispr_portal_web_result,
+ wispr_route_request,
+ wp_context);
+
+- if (wp_context->request_id == 0)
++ if (wp_context->request_id == 0) {
+ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
++ }
+ }
+
+ static bool wispr_input(const guint8 **data, gsize *length,
+@@ -562,13 +591,15 @@ static void wispr_portal_browser_reply_cb(struct connman_service *service,
+ return;
+
+ if (!authentication_done) {
+- wispr_portal_error(wp_context);
+ free_wispr_routes(wp_context);
++ wispr_portal_error(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+ /* Restarting the test */
+ __connman_service_wispr_start(service, wp_context->type);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static void wispr_portal_request_wispr_login(struct connman_service *service,
+@@ -592,7 +623,7 @@ static void wispr_portal_request_wispr_login(struct connman_service *service,
+ return;
+ }
+
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ return;
+ }
+
+@@ -644,11 +675,13 @@ static bool wispr_manage_message(GWebResult *result,
+
+ wp_context->wispr_result = CONNMAN_WISPR_RESULT_LOGIN;
+
++ wispr_portal_context_ref(wp_context);
+ if (__connman_agent_request_login_input(wp_context->service,
+ wispr_portal_request_wispr_login,
+- wp_context) != -EINPROGRESS)
++ wp_context) != -EINPROGRESS) {
+ wispr_portal_error(wp_context);
+- else
++ wispr_portal_context_unref(wp_context);
++ } else
+ return true;
+
+ break;
+@@ -697,6 +730,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (length > 0) {
+ g_web_parser_feed_data(wp_context->wispr_parser,
+ chunk, length);
++ wispr_portal_context_unref(wp_context);
+ return true;
+ }
+
+@@ -714,6 +748,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ switch (status) {
+ case 000:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -725,11 +760,14 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ if (g_web_result_get_header(result, "X-ConnMan-Status",
+ &str)) {
+ portal_manage_status(result, wp_context);
++ wispr_portal_context_unref(wp_context);
+ return false;
+- } else
++ } else {
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->redirect_url, wp_context);
++ }
+
+ break;
+ case 302:
+@@ -737,6 +775,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ !g_web_result_get_header(result, "Location",
+ &redirect)) {
+
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -747,6 +786,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ wp_context->redirect_url = g_strdup(redirect);
+
++ wispr_portal_context_ref(wp_context);
+ wp_context->request_id = g_web_request_get(wp_context->web,
+ redirect, wispr_portal_web_result,
+ wispr_route_request, wp_context);
+@@ -763,6 +803,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+
+ break;
+ case 505:
++ wispr_portal_context_ref(wp_context);
+ __connman_agent_request_browser(wp_context->service,
+ wispr_portal_browser_reply_cb,
+ wp_context->status_url, wp_context);
+@@ -775,6 +816,7 @@ static bool wispr_portal_web_result(GWebResult *result, gpointer user_data)
+ wp_context->request_id = 0;
+ done:
+ wp_context->wispr_msg.message_type = -1;
++ wispr_portal_context_unref(wp_context);
+ return false;
+ }
+
+@@ -809,6 +851,7 @@ static void proxy_callback(const char *proxy, void *user_data)
+ xml_wispr_parser_callback, wp_context);
+
+ wispr_portal_request_portal(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+
+ static gboolean no_proxy_callback(gpointer user_data)
+@@ -903,7 +946,7 @@ static int wispr_portal_detect(struct connman_wispr_portal_context *wp_context)
+
+ if (wp_context->token == 0) {
+ err = -EINVAL;
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+ }
+ } else if (wp_context->timeout == 0) {
+ wp_context->timeout = g_idle_add(no_proxy_callback, wp_context);
+@@ -952,7 +995,7 @@ int __connman_wispr_start(struct connman_service *service,
+
+ /* If there is already an existing context, we wipe it */
+ if (wp_context)
+- free_connman_wispr_portal_context(wp_context);
++ wispr_portal_context_unref(wp_context);
+
+ wp_context = create_wispr_portal_context();
+ if (!wp_context)
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
new file mode 100644
index 0000000000..ea1601cc04
--- /dev/null
+++ b/meta/recipes-connectivity/connman/connman/CVE-2023-28488.patch
@@ -0,0 +1,54 @@
+From 99e2c16ea1cced34a5dc450d76287a1c3e762138 Mon Sep 17 00:00:00 2001
+From: Daniel Wagner <wagi@monom.org>
+Date: Tue, 11 Apr 2023 08:12:56 +0200
+Subject: gdhcp: Verify and sanitize packet length first
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/patch/?id=99e2c16ea1cced34a5dc450d76287a1c3e762138]
+CVE: CVE-2023-28488
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ gdhcp/client.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/gdhcp/client.c b/gdhcp/client.c
+index 7efa7e45..82017692 100644
+--- a/gdhcp/client.c
++++ b/gdhcp/client.c
+@@ -1319,9 +1319,9 @@ static bool sanity_check(struct ip_udp_dhcp_packet *packet, int bytes)
+ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ struct sockaddr_in *dst_addr)
+ {
+- int bytes;
+ struct ip_udp_dhcp_packet packet;
+ uint16_t check;
++ int bytes, tot_len;
+
+ memset(&packet, 0, sizeof(packet));
+
+@@ -1329,15 +1329,17 @@ static int dhcp_recv_l2_packet(struct dhcp_packet *dhcp_pkt, int fd,
+ if (bytes < 0)
+ return -1;
+
+- if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
+- return -1;
+-
+- if (bytes < ntohs(packet.ip.tot_len))
++ tot_len = ntohs(packet.ip.tot_len);
++ if (bytes > tot_len) {
++ /* ignore any extra garbage bytes */
++ bytes = tot_len;
++ } else if (bytes < tot_len) {
+ /* packet is bigger than sizeof(packet), we did partial read */
+ return -1;
++ }
+
+- /* ignore any extra garbage bytes */
+- bytes = ntohs(packet.ip.tot_len);
++ if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp)))
++ return -1;
+
+ if (!sanity_check(&packet, bytes))
+ return -1;
+--
+cgit
+
diff --git a/meta/recipes-connectivity/connman/connman_1.37.bb b/meta/recipes-connectivity/connman/connman_1.37.bb
index bdd1e590ec..8062a094d3 100644
--- a/meta/recipes-connectivity/connman/connman_1.37.bb
+++ b/meta/recipes-connectivity/connman/connman_1.37.bb
@@ -12,6 +12,9 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \
file://CVE-2021-33833.patch \
file://CVE-2022-23096-7.patch \
file://CVE-2022-23098.patch \
+ file://CVE-2022-32292.patch \
+ file://CVE-2022-32293.patch \
+ file://CVE-2023-28488.patch \
"
SRC_URI_append_libc-musl = " file://0002-resolve-musl-does-not-implement-res_ninit.patch"
diff --git a/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch
new file mode 100644
index 0000000000..11f162cbda
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2928.patch
@@ -0,0 +1,120 @@
+From 8a5d739eea10ee6e193f053b1662142d5657cbc6 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 6 Oct 2022 09:39:18 +0530
+Subject: [PATCH] CVE-2022-2928
+
+Upstream-Status: Backport [https://downloads.isc.org/isc/dhcp/4.4.3-P1/patches/]
+CVE: CVE-2022-2928
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common/options.c | 7 +++++
+ common/tests/option_unittest.c | 54 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 61 insertions(+)
+
+diff --git a/common/options.c b/common/options.c
+index a7ed84c..4e53bb4 100644
+--- a/common/options.c
++++ b/common/options.c
+@@ -4452,6 +4452,8 @@ add_option(struct option_state *options,
+ if (!option_cache_allocate(&oc, MDL)) {
+ log_error("No memory for option cache adding %s (option %d).",
+ option->name, option_num);
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
+ return 0;
+ }
+
+@@ -4463,6 +4465,8 @@ add_option(struct option_state *options,
+ MDL)) {
+ log_error("No memory for constant data adding %s (option %d).",
+ option->name, option_num);
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
+ option_cache_dereference(&oc, MDL);
+ return 0;
+ }
+@@ -4471,6 +4475,9 @@ add_option(struct option_state *options,
+ save_option(&dhcp_universe, options, oc);
+ option_cache_dereference(&oc, MDL);
+
++ /* Get rid of reference created during hash lookup. */
++ option_dereference(&option, MDL);
++
+ return 1;
+ }
+
+diff --git a/common/tests/option_unittest.c b/common/tests/option_unittest.c
+index cd52cfb..690704d 100644
+--- a/common/tests/option_unittest.c
++++ b/common/tests/option_unittest.c
+@@ -130,6 +130,59 @@ ATF_TC_BODY(pretty_print_option, tc)
+ }
+
+
++ATF_TC(add_option_ref_cnt);
++
++ATF_TC_HEAD(add_option_ref_cnt, tc)
++{
++ atf_tc_set_md_var(tc, "descr",
++ "Verify add_option() does not leak option ref counts.");
++}
++
++ATF_TC_BODY(add_option_ref_cnt, tc)
++{
++ struct option_state *options = NULL;
++ struct option *option = NULL;
++ unsigned int cid_code = DHO_DHCP_CLIENT_IDENTIFIER;
++ char *cid_str = "1234";
++ int refcnt_before = 0;
++
++ // Look up the option we're going to add.
++ initialize_common_option_spaces();
++ if (!option_code_hash_lookup(&option, dhcp_universe.code_hash,
++ &cid_code, 0, MDL)) {
++ atf_tc_fail("cannot find option definition?");
++ }
++
++ // Get the option's reference count before we call add_options.
++ refcnt_before = option->refcnt;
++
++ // Allocate a option_state to which to add an option.
++ if (!option_state_allocate(&options, MDL)) {
++ atf_tc_fail("cannot allocat options state");
++ }
++
++ // Call add_option() to add the option to the option state.
++ if (!add_option(options, cid_code, cid_str, strlen(cid_str))) {
++ atf_tc_fail("add_option returned 0");
++ }
++
++ // Verify that calling add_option() only adds 1 to the option ref count.
++ if (option->refcnt != (refcnt_before + 1)) {
++ atf_tc_fail("after add_option(), count is wrong, before %d, after: %d",
++ refcnt_before, option->refcnt);
++ }
++
++ // Derefrence the option_state, this should reduce the ref count to
++ // it's starting value.
++ option_state_dereference(&options, MDL);
++
++ // Verify that dereferencing option_state restores option ref count.
++ if (option->refcnt != refcnt_before) {
++ atf_tc_fail("after state deref, count is wrong, before %d, after: %d",
++ refcnt_before, option->refcnt);
++ }
++}
++
+ /* This macro defines main() method that will call specified
+ test cases. tp and simple_test_case names can be whatever you want
+ as long as it is a valid variable identifier. */
+@@ -137,6 +190,7 @@ ATF_TP_ADD_TCS(tp)
+ {
+ ATF_TP_ADD_TC(tp, option_refcnt);
+ ATF_TP_ADD_TC(tp, pretty_print_option);
++ ATF_TP_ADD_TC(tp, add_option_ref_cnt);
+
+ return (atf_no_error());
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch
new file mode 100644
index 0000000000..d605204f89
--- /dev/null
+++ b/meta/recipes-connectivity/dhcp/dhcp/CVE-2022-2929.patch
@@ -0,0 +1,40 @@
+From 5c959166ebee7605e2048de573f2475b4d731ff7 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 6 Oct 2022 09:42:59 +0530
+Subject: [PATCH] CVE-2022-2929
+
+Upstream-Status: Backport [https://downloads.isc.org/isc/dhcp/4.4.3-P1/patches/]
+CVE: CVE-2022-2929
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common/options.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/common/options.c b/common/options.c
+index 4e53bb4..28800fc 100644
+--- a/common/options.c
++++ b/common/options.c
+@@ -454,16 +454,16 @@ int fqdn_universe_decode (struct option_state *options,
+ while (s < &bp -> data[0] + length + 2) {
+ len = *s;
+ if (len > 63) {
+- log_info ("fancy bits in fqdn option");
+- return 0;
++ log_info ("label length exceeds 63 in fqdn option");
++ goto bad;
+ }
+ if (len == 0) {
+ terminated = 1;
+ break;
+ }
+ if (s + len > &bp -> data [0] + length + 3) {
+- log_info ("fqdn tag longer than buffer");
+- return 0;
++ log_info ("fqdn label longer than buffer");
++ goto bad;
+ }
+
+ if (first_len == 0) {
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb b/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
index 5609a350cc..d3c87d0d07 100644
--- a/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
+++ b/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb
@@ -11,6 +11,8 @@ SRC_URI += "file://0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.pat
file://0013-fixup_use_libbind.patch \
file://0001-workaround-busybox-limitation-in-linux-dhclient-script.patch \
file://CVE-2021-25217.patch \
+ file://CVE-2022-2928.patch \
+ file://CVE-2022-2929.patch \
"
SRC_URI[md5sum] = "2afdaf8498dc1edaf3012efdd589b3e1"
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
new file mode 100644
index 0000000000..aea07bd803
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch
@@ -0,0 +1,283 @@
+From 703418fe9d2e3b1e8d594df5788d8001a8116265 Mon Sep 17 00:00:00 2001
+From: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Date: Fri, 30 Jun 2023 19:02:45 +0200
+Subject: [PATCH] CVE-2023-40303: ftpd,rcp,rlogin,rsh,rshd,uucpd: fix: check
+ set*id() return values
+
+Several setuid(), setgid(), seteuid() and setguid() return values
+were not checked in ftpd/rcp/rlogin/rsh/rshd/uucpd code potentially
+leading to potential security issues.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=e4e65c03f4c11292a3e40ef72ca3f194c8bffdd6]
+Signed-off-by: Jeffrey Bencteux <jeffbencteux@gmail.com>
+Signed-off-by: Simon Josefsson <simon@josefsson.org>
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ftpd/ftpd.c | 10 +++++++---
+ src/rcp.c | 39 +++++++++++++++++++++++++++++++++------
+ src/rlogin.c | 11 +++++++++--
+ src/rsh.c | 25 +++++++++++++++++++++----
+ src/rshd.c | 20 +++++++++++++++++---
+ src/uucpd.c | 15 +++++++++++++--
+ 6 files changed, 100 insertions(+), 20 deletions(-)
+
+diff --git a/ftpd/ftpd.c b/ftpd/ftpd.c
+index 5db88d0..b52b122 100644
+--- a/ftpd/ftpd.c
++++ b/ftpd/ftpd.c
+@@ -862,7 +862,9 @@ end_login (struct credentials *pcred)
+ char *remotehost = pcred->remotehost;
+ int atype = pcred->auth_type;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
++
+ if (pcred->logged_in)
+ {
+ logwtmp_keep_open (ttyline, "", "");
+@@ -1151,7 +1153,8 @@ getdatasock (const char *mode)
+
+ if (data >= 0)
+ return fdopen (data, mode);
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ s = socket (ctrl_addr.ss_family, SOCK_STREAM, 0);
+ if (s < 0)
+ goto bad;
+@@ -1978,7 +1981,8 @@ passive (int epsv, int af)
+ else /* !AF_INET6 */
+ ((struct sockaddr_in *) &pasv_addr)->sin_port = 0;
+
+- seteuid ((uid_t) 0);
++ if (seteuid ((uid_t) 0) == -1)
++ _exit (EXIT_FAILURE);
+ if (bind (pdata, (struct sockaddr *) &pasv_addr, pasv_addrlen) < 0)
+ {
+ if (seteuid ((uid_t) cred.uid))
+diff --git a/src/rcp.c b/src/rcp.c
+index bafa35f..366295c 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -347,14 +347,23 @@ main (int argc, char *argv[])
+ if (from_option)
+ { /* Follow "protocol", send data. */
+ response ();
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ source (argc, argv);
+ exit (errs);
+ }
+
+ if (to_option)
+ { /* Receive data. */
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ sink (argc, argv);
+ exit (errs);
+ }
+@@ -539,7 +548,11 @@ toremote (char *targ, int argc, char *argv[])
+ if (response () < 0)
+ exit (EXIT_FAILURE);
+ free (bp);
+- setuid (userid);
++
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -634,7 +647,12 @@ tolocal (int argc, char *argv[])
+ ++errs;
+ continue;
+ }
+- seteuid (userid);
++
++ if (seteuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+ (void) getpeername (rem, (struct sockaddr *) &ss, &sslen);
+@@ -647,7 +665,12 @@ tolocal (int argc, char *argv[])
+ #endif
+ vect[0] = target;
+ sink (1, vect);
+- seteuid (effuid);
++
++ if (seteuid (effuid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
+ close (rem);
+ rem = -1;
+ #ifdef SHISHI
+@@ -1453,7 +1476,11 @@ susystem (char *s, int userid)
+ return (127);
+
+ case 0:
+- setuid (userid);
++ if (setuid (userid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
++
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+ }
+diff --git a/src/rlogin.c b/src/rlogin.c
+index e5e11a7..6b38901 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -649,8 +649,15 @@ try_connect:
+ /* Now change to the real user ID. We have to be set-user-ID root
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index bd70372..b451a70 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -278,8 +278,17 @@ main (int argc, char **argv)
+ {
+ if (asrsh)
+ *argv = (char *) "rlogin";
+- seteuid (getuid ());
+- setuid (getuid ());
++
++ if (seteuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (getuid ()) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+ }
+@@ -543,8 +552,16 @@ try_connect:
+ error (0, errno, "setsockopt DEBUG (ignored)");
+ }
+
+- seteuid (uid);
+- setuid (uid);
++ if (seteuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
++
++ if (setuid (uid) == -1)
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
++
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+ sigaddset (&sigs, SIGINT);
+diff --git a/src/rshd.c b/src/rshd.c
+index b824a10..8cdcd06 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1848,8 +1848,18 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ pwd->pw_shell = PATH_BSHELL;
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+- setegid ((gid_t) pwd->pw_gid);
+- setgid ((gid_t) pwd->pw_gid);
++ if (setegid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
++ if (setgid ((gid_t) pwd->pw_gid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
++
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+ #endif
+@@ -1871,7 +1881,11 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ }
+ #endif /* WITH_PAM */
+
+- setuid ((uid_t) pwd->pw_uid);
++ if (setuid ((uid_t) pwd->pw_uid) == -1)
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 55c3d44..6aba294 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -254,7 +254,12 @@ doit (struct sockaddr *sap, socklen_t salen)
+ sprintf (Username, "USER=%s", user);
+ sprintf (Logname, "LOGNAME=%s", user);
+ dologin (pw, sap, salen);
+- setgid (pw->pw_gid);
++
++ if (setgid (pw->pw_gid) == -1)
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -263,7 +268,13 @@ doit (struct sockaddr *sap, socklen_t salen)
+ fprintf (stderr, "Login incorrect.");
+ return;
+ }
+- setuid (pw->pw_uid);
++
++ if (setuid (pw->pw_uid) == -1)
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
++
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
new file mode 100644
index 0000000000..4bc354d256
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch
@@ -0,0 +1,254 @@
+From 70fe022f9dac760eaece0228cad17e3d29a57fb8 Mon Sep 17 00:00:00 2001
+From: Simon Josefsson <simon@josefsson.org>
+Date: Mon, 31 Jul 2023 13:59:05 +0200
+Subject: [PATCH] CVE-2023-40303: Indent changes in previous commit.
+
+CVE: CVE-2023-40303
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=9122999252c7e21eb7774de11d539748e7bdf46d]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/rcp.c | 42 ++++++++++++++++++++++++------------------
+ src/rlogin.c | 12 ++++++------
+ src/rsh.c | 24 ++++++++++++------------
+ src/rshd.c | 24 ++++++++++++------------
+ src/uucpd.c | 16 ++++++++--------
+ 5 files changed, 62 insertions(+), 56 deletions(-)
+
+diff --git a/src/rcp.c b/src/rcp.c
+index cdcf8500..652f22e6 100644
+--- a/src/rcp.c
++++ b/src/rcp.c
+@@ -347,9 +347,10 @@ main (int argc, char *argv[])
+ response ();
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ source (argc, argv);
+ exit (errs);
+@@ -358,9 +359,10 @@ main (int argc, char *argv[])
+ if (to_option)
+ { /* Receive data. */
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ sink (argc, argv);
+ exit (errs);
+@@ -548,9 +550,10 @@ toremote (char *targ, int argc, char *argv[])
+ free (bp);
+
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+ }
+ source (1, argv + i);
+ close (rem);
+@@ -645,9 +648,10 @@ tolocal (int argc, char *argv[])
+ }
+
+ if (seteuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ #if defined IP_TOS && defined IPPROTO_IP && defined IPTOS_THROUGHPUT
+ sslen = sizeof (ss);
+@@ -663,9 +667,10 @@ tolocal (int argc, char *argv[])
+ sink (1, vect);
+
+ if (seteuid (effuid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (seteuid() failed)");
++ }
+
+ close (rem);
+ rem = -1;
+@@ -1465,9 +1470,10 @@ susystem (char *s, int userid)
+
+ case 0:
+ if (setuid (userid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0,
++ "Could not drop privileges (setuid() failed)");
++ }
+
+ execl (PATH_BSHELL, "sh", "-c", s, NULL);
+ _exit (127);
+diff --git a/src/rlogin.c b/src/rlogin.c
+index c543de0c..4360202f 100644
+--- a/src/rlogin.c
++++ b/src/rlogin.c
+@@ -648,14 +648,14 @@ try_connect:
+ to get the privileged port that rcmd () uses. We now want, however,
+ to run as the real user who invoked us. */
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (seteuid() failed)");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
+- }
++ {
++ error (EXIT_FAILURE, 0, "Could not drop privileges (setuid() failed)");
++ }
+
+ doit (&osmask); /* The old mask will activate SIGURG and SIGUSR1! */
+
+diff --git a/src/rsh.c b/src/rsh.c
+index 6f60667d..179b47cd 100644
+--- a/src/rsh.c
++++ b/src/rsh.c
+@@ -278,14 +278,14 @@ main (int argc, char **argv)
+ *argv = (char *) "rlogin";
+
+ if (seteuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (getuid ()) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ execv (PATH_RLOGIN, argv);
+ error (EXIT_FAILURE, errno, "cannot execute %s", PATH_RLOGIN);
+@@ -551,14 +551,14 @@ try_connect:
+ }
+
+ if (seteuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "seteuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "seteuid() failed");
++ }
+
+ if (setuid (uid) == -1)
+- {
+- error (EXIT_FAILURE, errno, "setuid() failed");
+- }
++ {
++ error (EXIT_FAILURE, errno, "setuid() failed");
++ }
+
+ #ifdef HAVE_SIGACTION
+ sigemptyset (&sigs);
+diff --git a/src/rshd.c b/src/rshd.c
+index 707790e7..3a153a18 100644
+--- a/src/rshd.c
++++ b/src/rshd.c
+@@ -1848,16 +1848,16 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+
+ /* Set the gid, then uid to become the user specified by "locuser" */
+ if (setegid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setegid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setegid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ if (setgid ((gid_t) pwd->pw_gid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setgid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setgid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ #ifdef HAVE_INITGROUPS
+ initgroups (pwd->pw_name, pwd->pw_gid); /* BSD groups */
+@@ -1881,10 +1881,10 @@ doit (int sockfd, struct sockaddr *fromp, socklen_t fromlen)
+ #endif /* WITH_PAM */
+
+ if (setuid ((uid_t) pwd->pw_uid) == -1)
+- {
+- rshd_error ("Cannot drop privileges (setuid() failed)\n");
+- exit (EXIT_FAILURE);
+- }
++ {
++ rshd_error ("Cannot drop privileges (setuid() failed)\n");
++ exit (EXIT_FAILURE);
++ }
+
+ /* We'll execute the client's command in the home directory
+ * of locuser. Note, that the chdir must be executed after
+diff --git a/src/uucpd.c b/src/uucpd.c
+index 29cfce35..fde7b9c9 100644
+--- a/src/uucpd.c
++++ b/src/uucpd.c
+@@ -254,10 +254,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ dologin (pw, sap, salen);
+
+ if (setgid (pw->pw_gid) == -1)
+- {
+- fprintf (stderr, "setgid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setgid() failed");
++ return;
++ }
+ #ifdef HAVE_INITGROUPS
+ initgroups (pw->pw_name, pw->pw_gid);
+ #endif
+@@ -268,10 +268,10 @@ doit (struct sockaddr *sap, socklen_t salen)
+ }
+
+ if (setuid (pw->pw_uid) == -1)
+- {
+- fprintf (stderr, "setuid() failed");
+- return;
+- }
++ {
++ fprintf (stderr, "setuid() failed");
++ return;
++ }
+
+ execl (uucico_location, "uucico", NULL);
+ perror ("uucico server: execl");
diff --git a/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
new file mode 100644
index 0000000000..da2da8da8a
--- /dev/null
+++ b/meta/recipes-connectivity/inetutils/inetutils/CVE-2022-39028.patch
@@ -0,0 +1,54 @@
+From eaae65aac967f9628787dca4a2501ca860bb6598 Mon Sep 17 00:00:00 2001
+From: Minjae Kim <flowergom@gmail.com>
+Date: Mon, 26 Sep 2022 22:05:07 +0200
+Subject: [PATCH] telnetd: Handle early IAC EC or IAC EL receipt
+
+Fix telnetd crash if the first two bytes of a new connection
+are 0xff 0xf7 (IAC EC) or 0xff 0xf8 (IAC EL).
+
+The problem was reported in:
+<https://pierrekim.github.io/blog/2022-08-24-2-byte-dos-freebsd-netbsd-telnetd-netkit-telnetd-inetutils-telnetd-kerberos-telnetd.html>.
+
+* NEWS: Mention fix.
+* telnetd/state.c (telrcv): Handle zero slctab[SLC_EC].sptr and
+zero slctab[SLC_EL].sptr.
+
+CVE: CVE-2022-39028
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/inetutils.git/commit/?id=fae8263e467380483c28513c0e5fac143e46f94f]
+Signed-off-by: Minjae Kim<flowergom@gmail.com>
+---
+ telnetd/state.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/telnetd/state.c b/telnetd/state.c
+index 2184bca..7948503 100644
+--- a/telnetd/state.c
++++ b/telnetd/state.c
+@@ -314,15 +314,21 @@ telrcv (void)
+ case EC:
+ case EL:
+ {
+- cc_t ch;
++ cc_t ch = (cc_t) (_POSIX_VDISABLE);
+
+ DEBUG (debug_options, 1, printoption ("td: recv IAC", c));
+ ptyflush (); /* half-hearted */
+ init_termbuf ();
+ if (c == EC)
+- ch = *slctab[SLC_EC].sptr;
++ {
++ if (slctab[SLC_EC].sptr)
++ ch = *slctab[SLC_EC].sptr;
++ }
+ else
+- ch = *slctab[SLC_EL].sptr;
++ {
++ if (slctab[SLC_EL].sptr)
++ ch = *slctab[SLC_EL].sptr;
++ }
+ if (ch != (cc_t) (_POSIX_VDISABLE))
+ pty_output_byte ((unsigned char) ch);
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
index f4450e19f4..3a68b34825 100644
--- a/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
+++ b/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb
@@ -24,6 +24,9 @@ SRC_URI = "${GNU_MIRROR}/inetutils/inetutils-${PV}.tar.gz \
file://0001-rcp-fix-to-work-with-large-files.patch \
file://fix-buffer-fortify-tfpt.patch \
file://CVE-2021-40491.patch \
+ file://CVE-2022-39028.patch \
+ file://0001-CVE-2023-40303-ftpd-rcp-rlogin-rsh-rshd-uucpd-fix-ch.patch \
+ file://0002-CVE-2023-40303-Indent-changes-in-previous-commit.patch \
"
SRC_URI[md5sum] = "04852c26c47cc8c6b825f2b74f191f52"
diff --git a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
index 781b9216c5..a4030b7b32 100644
--- a/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
+++ b/meta/recipes-connectivity/mobile-broadband-provider-info/mobile-broadband-provider-info_git.bb
@@ -5,8 +5,8 @@ SECTION = "network"
LICENSE = "PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=87964579b2a8ece4bc6744d2dc9a8b04"
-SRCREV = "4cbb44a9fe26aa6f0b28beb79f9488b37c097b5e"
-PV = "20220315"
+SRCREV = "aae7c68671d225e6d35224613d5b98192b9b2ffe"
+PV = "20230416"
PE = "1"
SRC_URI = "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=main"
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch
new file mode 100644
index 0000000000..c899056337
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-01.patch
@@ -0,0 +1,189 @@
+From f6213e03887237714eb5bcfc9089c707069f87c5 Mon Sep 17 00:00:00 2001
+From: Damien Miller <djm@mindrot.org>
+Date: Fri, 1 Oct 2021 16:35:49 +1000
+Subject: [PATCH 01/12] make OPENSSL_HAS_ECC checks more thorough
+
+ok dtucker
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/dee22129bbc61e25b1003adfa2bc584c5406ef2d]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11-client.c | 16 ++++++++--------
+ ssh-pkcs11.c | 26 +++++++++++++-------------
+ 2 files changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/ssh-pkcs11-client.c b/ssh-pkcs11-client.c
+index 8a0ffef..41114c7 100644
+--- a/ssh-pkcs11-client.c
++++ b/ssh-pkcs11-client.c
+@@ -163,7 +163,7 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ return (ret);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ const BIGNUM *rp, EC_KEY *ec)
+@@ -220,12 +220,12 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ sshbuf_free(msg);
+ return (ret);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ static RSA_METHOD *helper_rsa;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static EC_KEY_METHOD *helper_ecdsa;
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* redirect private key crypto operations to the ssh-pkcs11-helper */
+ static void
+@@ -233,10 +233,10 @@ wrap_key(struct sshkey *k)
+ {
+ if (k->type == KEY_RSA)
+ RSA_set_method(k->rsa, helper_rsa);
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ else if (k->type == KEY_ECDSA)
+ EC_KEY_set_method(k->ecdsa, helper_ecdsa);
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ else
+ fatal("%s: unknown key type", __func__);
+ }
+@@ -247,7 +247,7 @@ pkcs11_start_helper_methods(void)
+ if (helper_rsa != NULL)
+ return (0);
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ int (*orig_sign)(int, const unsigned char *, int, unsigned char *,
+ unsigned int *, const BIGNUM *, const BIGNUM *, EC_KEY *) = NULL;
+ if (helper_ecdsa != NULL)
+@@ -257,7 +257,7 @@ pkcs11_start_helper_methods(void)
+ return (-1);
+ EC_KEY_METHOD_get_sign(helper_ecdsa, &orig_sign, NULL, NULL);
+ EC_KEY_METHOD_set_sign(helper_ecdsa, orig_sign, NULL, ecdsa_do_sign);
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ if ((helper_rsa = RSA_meth_dup(RSA_get_default_method())) == NULL)
+ fatal("%s: RSA_meth_dup failed", __func__);
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index a302c79..b56a41b 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -78,7 +78,7 @@ struct pkcs11_key {
+
+ int pkcs11_interactive = 0;
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static void
+ ossl_error(const char *msg)
+ {
+@@ -89,7 +89,7 @@ ossl_error(const char *msg)
+ error("%s: libcrypto error: %.100s", __func__,
+ ERR_error_string(e, NULL));
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ int
+ pkcs11_init(int interactive)
+@@ -190,10 +190,10 @@ pkcs11_del_provider(char *provider_id)
+
+ static RSA_METHOD *rsa_method;
+ static int rsa_idx = 0;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static EC_KEY_METHOD *ec_key_method;
+ static int ec_key_idx = 0;
+-#endif
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* release a wrapped object */
+ static void
+@@ -492,7 +492,7 @@ pkcs11_rsa_wrap(struct pkcs11_provider *provider, CK_ULONG slotidx,
+ return (0);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ /* openssl callback doing the actual signing operation */
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+@@ -604,7 +604,7 @@ pkcs11_ecdsa_wrap(struct pkcs11_provider *provider, CK_ULONG slotidx,
+
+ return (0);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ /* remove trailing spaces */
+ static void
+@@ -679,7 +679,7 @@ pkcs11_key_included(struct sshkey ***keysp, int *nkeys, struct sshkey *key)
+ return (0);
+ }
+
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ static struct sshkey *
+ pkcs11_fetch_ecdsa_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ CK_OBJECT_HANDLE *obj)
+@@ -802,7 +802,7 @@ fail:
+
+ return (key);
+ }
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+ static struct sshkey *
+ pkcs11_fetch_rsa_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+@@ -910,7 +910,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ #endif
+ struct sshkey *key = NULL;
+ int i;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ int nid;
+ #endif
+ const u_char *cp;
+@@ -999,7 +999,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ key->type = KEY_RSA;
+ key->flags |= SSHKEY_FLAG_EXT;
+ rsa = NULL; /* now owned by key */
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ } else if (EVP_PKEY_base_id(evp) == EVP_PKEY_EC) {
+ if (EVP_PKEY_get0_EC_KEY(evp) == NULL) {
+ error("invalid x509; no ec key");
+@@ -1030,7 +1030,7 @@ pkcs11_fetch_x509_pubkey(struct pkcs11_provider *p, CK_ULONG slotidx,
+ key->type = KEY_ECDSA;
+ key->flags |= SSHKEY_FLAG_EXT;
+ ec = NULL; /* now owned by key */
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ } else {
+ error("unknown certificate key type");
+ goto out;
+@@ -1237,11 +1237,11 @@ pkcs11_fetch_keys(struct pkcs11_provider *p, CK_ULONG slotidx,
+ case CKK_RSA:
+ key = pkcs11_fetch_rsa_pubkey(p, slotidx, &obj);
+ break;
+-#ifdef HAVE_EC_KEY_METHOD_NEW
++#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+ case CKK_ECDSA:
+ key = pkcs11_fetch_ecdsa_pubkey(p, slotidx, &obj);
+ break;
+-#endif /* HAVE_EC_KEY_METHOD_NEW */
++#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+ default:
+ /* XXX print key type? */
+ key = NULL;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch
new file mode 100644
index 0000000000..25ba921869
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-02.patch
@@ -0,0 +1,581 @@
+From 92cebfbcc221c9ef3f6bbb78da3d7699c0ae56be Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:03:45 +0000
+Subject: [PATCH 02/12] upstream: Separate ssh-pkcs11-helpers for each p11
+ module
+
+Make ssh-pkcs11-client start an independent helper for each provider,
+providing better isolation between modules and reliability if a single
+module misbehaves.
+
+This also implements reference counting of PKCS#11-hosted keys,
+allowing ssh-pkcs11-helper subprocesses to be automatically reaped
+when no remaining keys reference them. This fixes some bugs we have
+that make PKCS11 keys unusable after they have been deleted, e.g.
+https://bugzilla.mindrot.org/show_bug.cgi?id=3125
+
+ok markus@
+
+OpenBSD-Commit-ID: 0ce188b14fe271ab0568f4500070d96c5657244e
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/099cdf59ce1e72f55d421c8445bf6321b3004755]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11-client.c | 372 +++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 282 insertions(+), 90 deletions(-)
+
+diff --git a/ssh-pkcs11-client.c b/ssh-pkcs11-client.c
+index 41114c7..4f3c6ed 100644
+--- a/ssh-pkcs11-client.c
++++ b/ssh-pkcs11-client.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-pkcs11-client.c,v 1.16 2020/01/25 00:03:36 djm Exp $ */
++/* $OpenBSD: ssh-pkcs11-client.c,v 1.18 2023/07/19 14:03:45 djm Exp $ */
+ /*
+ * Copyright (c) 2010 Markus Friedl. All rights reserved.
+ * Copyright (c) 2014 Pedro Martelletto. All rights reserved.
+@@ -30,12 +30,11 @@
+ #include <string.h>
+ #include <unistd.h>
+ #include <errno.h>
++#include <limits.h>
+
+ #include <openssl/ecdsa.h>
+ #include <openssl/rsa.h>
+
+-#include "openbsd-compat/openssl-compat.h"
+-
+ #include "pathnames.h"
+ #include "xmalloc.h"
+ #include "sshbuf.h"
+@@ -47,18 +46,140 @@
+ #include "ssh-pkcs11.h"
+ #include "ssherr.h"
+
++#include "openbsd-compat/openssl-compat.h"
++
+ /* borrows code from sftp-server and ssh-agent */
+
+-static int fd = -1;
+-static pid_t pid = -1;
++/*
++ * Maintain a list of ssh-pkcs11-helper subprocesses. These may be looked up
++ * by provider path or their unique EC/RSA METHOD pointers.
++ */
++struct helper {
++ char *path;
++ pid_t pid;
++ int fd;
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++ int (*rsa_finish)(RSA *rsa);
++ void (*ec_finish)(EC_KEY *key);
++ size_t nrsa, nec; /* number of active keys of each type */
++};
++static struct helper **helpers;
++static size_t nhelpers;
++
++static struct helper *
++helper_by_provider(const char *path)
++{
++ size_t i;
++
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == NULL || helpers[i]->path == NULL ||
++ helpers[i]->fd == -1)
++ continue;
++ if (strcmp(helpers[i]->path, path) == 0)
++ return helpers[i];
++ }
++ return NULL;
++}
++
++static struct helper *
++helper_by_rsa(const RSA *rsa)
++{
++ size_t i;
++ const RSA_METHOD *meth;
++
++ if ((meth = RSA_get_method(rsa)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->rsa_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static struct helper *
++helper_by_ec(const EC_KEY *ec)
++{
++ size_t i;
++ const EC_KEY_METHOD *meth;
++
++ if ((meth = EC_KEY_get_method(ec)) == NULL)
++ return NULL;
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] != NULL && helpers[i]->ec_meth == meth)
++ return helpers[i];
++ }
++ return NULL;
++
++}
++
++static void
++helper_free(struct helper *helper)
++{
++ size_t i;
++ int found = 0;
++
++ if (helper == NULL)
++ return;
++ if (helper->path == NULL || helper->ec_meth == NULL ||
++ helper->rsa_meth == NULL)
++ fatal("%s: inconsistent helper", __func__);
++ debug3("%s: free helper for provider %s", __func__ , helper->path);
++ for (i = 0; i < nhelpers; i++) {
++ if (helpers[i] == helper) {
++ if (found)
++ fatal("%s: helper recorded more than once", __func__);
++ found = 1;
++ }
++ else if (found)
++ helpers[i - 1] = helpers[i];
++ }
++ if (found) {
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers - 1, sizeof(*helpers));
++ nhelpers--;
++ }
++ free(helper->path);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ RSA_meth_free(helper->rsa_meth);
++ free(helper);
++}
++
++static void
++helper_terminate(struct helper *helper)
++{
++ if (helper == NULL) {
++ return;
++ } else if (helper->fd == -1) {
++ debug3("%s: already terminated", __func__);
++ } else {
++ debug3("terminating helper for %s; "
++ "remaining %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ close(helper->fd);
++ /* XXX waitpid() */
++ helper->fd = -1;
++ helper->pid = -1;
++ }
++ /*
++ * Don't delete the helper entry until there are no remaining keys
++ * that reference it. Otherwise, any signing operation would call
++ * a free'd METHOD pointer and that would be bad.
++ */
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_free(helper);
++}
+
+ static void
+-send_msg(struct sshbuf *m)
++send_msg(int fd, struct sshbuf *m)
+ {
+ u_char buf[4];
+ size_t mlen = sshbuf_len(m);
+ int r;
+
++ if (fd == -1)
++ return;
+ POKE_U32(buf, mlen);
+ if (atomicio(vwrite, fd, buf, 4) != 4 ||
+ atomicio(vwrite, fd, sshbuf_mutable_ptr(m),
+@@ -69,12 +190,15 @@ send_msg(struct sshbuf *m)
+ }
+
+ static int
+-recv_msg(struct sshbuf *m)
++recv_msg(int fd, struct sshbuf *m)
+ {
+ u_int l, len;
+ u_char c, buf[1024];
+ int r;
+
++ sshbuf_reset(m);
++ if (fd == -1)
++ return 0; /* XXX */
+ if ((len = atomicio(read, fd, buf, 4)) != 4) {
+ error("read from helper failed: %u", len);
+ return (0); /* XXX */
+@@ -83,7 +207,6 @@ recv_msg(struct sshbuf *m)
+ if (len > 256 * 1024)
+ fatal("response too long: %u", len);
+ /* read len bytes into m */
+- sshbuf_reset(m);
+ while (len > 0) {
+ l = len;
+ if (l > sizeof(buf))
+@@ -104,14 +227,17 @@ recv_msg(struct sshbuf *m)
+ int
+ pkcs11_init(int interactive)
+ {
+- return (0);
++ return 0;
+ }
+
+ void
+ pkcs11_terminate(void)
+ {
+- if (fd >= 0)
+- close(fd);
++ size_t i;
++
++ debug3("%s: terminating %zu helpers", __func__, nhelpers);
++ for (i = 0; i < nhelpers; i++)
++ helper_terminate(helpers[i]);
+ }
+
+ static int
+@@ -122,7 +248,11 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, ret = -1;
++ struct helper *helper;
+
++ if ((helper = helper_by_rsa(rsa)) == NULL || helper->fd == -1)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: signing with PKCS11 provider %s", __func__, helper->path);
+ if (padding != RSA_PKCS1_PADDING)
+ goto fail;
+ key = sshkey_new(KEY_UNSPEC);
+@@ -144,10 +274,10 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ (r = sshbuf_put_string(msg, from, flen)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ if (slen <= (size_t)RSA_size(rsa)) {
+@@ -163,7 +293,26 @@ rsa_encrypt(int flen, const u_char *from, u_char *to, RSA *rsa, int padding)
+ return (ret);
+ }
+
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
++static int
++rsa_finish(RSA *rsa)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_rsa(rsa)) == NULL)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: free PKCS11 RSA key for provider %s", __func__, helper->path);
++ if (helper->rsa_finish != NULL)
++ helper->rsa_finish(rsa);
++ if (helper->nrsa == 0)
++ fatal("%s: RSA refcount error", __func__);
++ helper->nrsa--;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++ return 1;
++}
++
+ static ECDSA_SIG *
+ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ const BIGNUM *rp, EC_KEY *ec)
+@@ -175,7 +324,11 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ u_char *blob = NULL, *signature = NULL;
+ size_t blen, slen = 0;
+ int r, nid;
++ struct helper *helper;
+
++ if ((helper = helper_by_ec(ec)) == NULL || helper->fd == -1)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: signing with PKCS11 provider %s", __func__, helper->path);
+ nid = sshkey_ecdsa_key_to_nid(ec);
+ if (nid < 0) {
+ error("%s: couldn't get curve nid", __func__);
+@@ -203,10 +356,10 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ (r = sshbuf_put_string(msg, dgst, dgst_len)) != 0 ||
+ (r = sshbuf_put_u32(msg, 0)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- if (recv_msg(msg) == SSH2_AGENT_SIGN_RESPONSE) {
++ if (recv_msg(helper->fd, msg) == SSH2_AGENT_SIGN_RESPONSE) {
+ if ((r = sshbuf_get_string(msg, &signature, &slen)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ cp = signature;
+@@ -220,75 +373,110 @@ ecdsa_do_sign(const unsigned char *dgst, int dgst_len, const BIGNUM *inv,
+ sshbuf_free(msg);
+ return (ret);
+ }
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+
+-static RSA_METHOD *helper_rsa;
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+-static EC_KEY_METHOD *helper_ecdsa;
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
++static void
++ecdsa_do_finish(EC_KEY *ec)
++{
++ struct helper *helper;
++
++ if ((helper = helper_by_ec(ec)) == NULL)
++ fatal("%s: no helper for PKCS11 key", __func__);
++ debug3("%s: free PKCS11 ECDSA key for provider %s", __func__, helper->path);
++ if (helper->ec_finish != NULL)
++ helper->ec_finish(ec);
++ if (helper->nec == 0)
++ fatal("%s: ECDSA refcount error", __func__);
++ helper->nec--;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
++ if (helper->nrsa == 0 && helper->nec == 0)
++ helper_terminate(helper);
++}
+
+ /* redirect private key crypto operations to the ssh-pkcs11-helper */
+ static void
+-wrap_key(struct sshkey *k)
++wrap_key(struct helper *helper, struct sshkey *k)
+ {
+- if (k->type == KEY_RSA)
+- RSA_set_method(k->rsa, helper_rsa);
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- else if (k->type == KEY_ECDSA)
+- EC_KEY_set_method(k->ecdsa, helper_ecdsa);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+- else
++ debug3("%s: wrap %s for provider %s", __func__, sshkey_type(k), helper->path);
++ if (k->type == KEY_RSA) {
++ RSA_set_method(k->rsa, helper->rsa_meth);
++ if (helper->nrsa++ >= INT_MAX)
++ fatal("%s: RSA refcount error", __func__);
++ } else if (k->type == KEY_ECDSA) {
++ EC_KEY_set_method(k->ecdsa, helper->ec_meth);
++ if (helper->nec++ >= INT_MAX)
++ fatal("%s: EC refcount error", __func__);
++ } else
+ fatal("%s: unknown key type", __func__);
++ k->flags |= SSHKEY_FLAG_EXT;
++ debug3("%s: provider %s remaining keys: %zu RSA %zu ECDSA", __func__,
++ helper->path, helper->nrsa, helper->nec);
+ }
+
+ static int
+-pkcs11_start_helper_methods(void)
++pkcs11_start_helper_methods(struct helper *helper)
+ {
+- if (helper_rsa != NULL)
+- return (0);
+-
+-#if defined(OPENSSL_HAS_ECC) && defined(HAVE_EC_KEY_METHOD_NEW)
+- int (*orig_sign)(int, const unsigned char *, int, unsigned char *,
++ int (*ec_init)(EC_KEY *key);
++ int (*ec_copy)(EC_KEY *dest, const EC_KEY *src);
++ int (*ec_set_group)(EC_KEY *key, const EC_GROUP *grp);
++ int (*ec_set_private)(EC_KEY *key, const BIGNUM *priv_key);
++ int (*ec_set_public)(EC_KEY *key, const EC_POINT *pub_key);
++ int (*ec_sign)(int, const unsigned char *, int, unsigned char *,
+ unsigned int *, const BIGNUM *, const BIGNUM *, EC_KEY *) = NULL;
+- if (helper_ecdsa != NULL)
+- return (0);
+- helper_ecdsa = EC_KEY_METHOD_new(EC_KEY_OpenSSL());
+- if (helper_ecdsa == NULL)
+- return (-1);
+- EC_KEY_METHOD_get_sign(helper_ecdsa, &orig_sign, NULL, NULL);
+- EC_KEY_METHOD_set_sign(helper_ecdsa, orig_sign, NULL, ecdsa_do_sign);
+-#endif /* OPENSSL_HAS_ECC && HAVE_EC_KEY_METHOD_NEW */
+-
+- if ((helper_rsa = RSA_meth_dup(RSA_get_default_method())) == NULL)
++ RSA_METHOD *rsa_meth;
++ EC_KEY_METHOD *ec_meth;
++
++ if ((ec_meth = EC_KEY_METHOD_new(EC_KEY_OpenSSL())) == NULL)
++ return -1;
++ EC_KEY_METHOD_get_sign(ec_meth, &ec_sign, NULL, NULL);
++ EC_KEY_METHOD_set_sign(ec_meth, ec_sign, NULL, ecdsa_do_sign);
++ EC_KEY_METHOD_get_init(ec_meth, &ec_init, &helper->ec_finish,
++ &ec_copy, &ec_set_group, &ec_set_private, &ec_set_public);
++ EC_KEY_METHOD_set_init(ec_meth, ec_init, ecdsa_do_finish,
++ ec_copy, ec_set_group, ec_set_private, ec_set_public);
++
++ if ((rsa_meth = RSA_meth_dup(RSA_get_default_method())) == NULL)
+ fatal("%s: RSA_meth_dup failed", __func__);
+- if (!RSA_meth_set1_name(helper_rsa, "ssh-pkcs11-helper") ||
+- !RSA_meth_set_priv_enc(helper_rsa, rsa_encrypt))
++ helper->rsa_finish = RSA_meth_get_finish(rsa_meth);
++ if (!RSA_meth_set1_name(rsa_meth, "ssh-pkcs11-helper") ||
++ !RSA_meth_set_priv_enc(rsa_meth, rsa_encrypt) ||
++ !RSA_meth_set_finish(rsa_meth, rsa_finish))
+ fatal("%s: failed to prepare method", __func__);
+
+- return (0);
++ helper->ec_meth = ec_meth;
++ helper->rsa_meth = rsa_meth;
++ return 0;
+ }
+
+-static int
+-pkcs11_start_helper(void)
++static struct helper *
++pkcs11_start_helper(const char *path)
+ {
+ int pair[2];
+- char *helper, *verbosity = NULL;
+-
+- if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
+- verbosity = "-vvv";
+-
+- if (pkcs11_start_helper_methods() == -1) {
+- error("pkcs11_start_helper_methods failed");
+- return (-1);
+- }
++ char *prog, *verbosity = NULL;
++ struct helper *helper;
++ pid_t pid;
+
++ if (nhelpers >= INT_MAX)
++ fatal("%s: too many helpers", __func__);
++ debug3("%s: start helper for %s", __func__, path);
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ error("socketpair: %s", strerror(errno));
+- return (-1);
++ return NULL;
++ }
++ helper = xcalloc(1, sizeof(*helper));
++ if (pkcs11_start_helper_methods(helper) == -1) {
++ error("pkcs11_start_helper_methods failed");
++ goto fail;
+ }
+ if ((pid = fork()) == -1) {
+ error("fork: %s", strerror(errno));
+- return (-1);
++ fail:
++ close(pair[0]);
++ close(pair[1]);
++ RSA_meth_free(helper->rsa_meth);
++ EC_KEY_METHOD_free(helper->ec_meth);
++ free(helper);
++ return NULL;
+ } else if (pid == 0) {
+ if ((dup2(pair[1], STDIN_FILENO) == -1) ||
+ (dup2(pair[1], STDOUT_FILENO) == -1)) {
+@@ -297,18 +485,27 @@ pkcs11_start_helper(void)
+ }
+ close(pair[0]);
+ close(pair[1]);
+- helper = getenv("SSH_PKCS11_HELPER");
+- if (helper == NULL || strlen(helper) == 0)
+- helper = _PATH_SSH_PKCS11_HELPER;
++ prog = getenv("SSH_PKCS11_HELPER");
++ if (prog == NULL || strlen(prog) == 0)
++ prog = _PATH_SSH_PKCS11_HELPER;
++ if (log_level_get() >= SYSLOG_LEVEL_DEBUG1)
++ verbosity = "-vvv";
+ debug("%s: starting %s %s", __func__, helper,
+ verbosity == NULL ? "" : verbosity);
+- execlp(helper, helper, verbosity, (char *)NULL);
+- fprintf(stderr, "exec: %s: %s\n", helper, strerror(errno));
++ execlp(prog, prog, verbosity, (char *)NULL);
++ fprintf(stderr, "exec: %s: %s\n", prog, strerror(errno));
+ _exit(1);
+ }
+ close(pair[1]);
+- fd = pair[0];
+- return (0);
++ helper->fd = pair[0];
++ helper->path = xstrdup(path);
++ helper->pid = pid;
++ debug3("%s: helper %zu for \"%s\" on fd %d pid %ld", __func__, nhelpers,
++ helper->path, helper->fd, (long)helper->pid);
++ helpers = xrecallocarray(helpers, nhelpers,
++ nhelpers + 1, sizeof(*helpers));
++ helpers[nhelpers++] = helper;
++ return helper;
+ }
+
+ int
+@@ -322,9 +519,11 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ size_t blen;
+ u_int nkeys, i;
+ struct sshbuf *msg;
++ struct helper *helper;
+
+- if (fd < 0 && pkcs11_start_helper() < 0)
+- return (-1);
++ if ((helper = helper_by_provider(name)) == NULL &&
++ (helper = pkcs11_start_helper(name)) == NULL)
++ return -1;
+
+ if ((msg = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+@@ -332,10 +531,10 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ (r = sshbuf_put_cstring(msg, name)) != 0 ||
+ (r = sshbuf_put_cstring(msg, pin)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
++ send_msg(helper->fd, msg);
+ sshbuf_reset(msg);
+
+- type = recv_msg(msg);
++ type = recv_msg(helper->fd, msg);
+ if (type == SSH2_AGENT_IDENTITIES_ANSWER) {
+ if ((r = sshbuf_get_u32(msg, &nkeys)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -350,7 +549,7 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ __func__, ssh_err(r));
+ if ((r = sshkey_from_blob(blob, blen, &k)) != 0)
+ fatal("%s: bad key: %s", __func__, ssh_err(r));
+- wrap_key(k);
++ wrap_key(helper, k);
+ (*keysp)[i] = k;
+ if (labelsp)
+ (*labelsp)[i] = label;
+@@ -371,22 +570,15 @@ pkcs11_add_provider(char *name, char *pin, struct sshkey ***keysp,
+ int
+ pkcs11_del_provider(char *name)
+ {
+- int r, ret = -1;
+- struct sshbuf *msg;
+-
+- if ((msg = sshbuf_new()) == NULL)
+- fatal("%s: sshbuf_new failed", __func__);
+- if ((r = sshbuf_put_u8(msg, SSH_AGENTC_REMOVE_SMARTCARD_KEY)) != 0 ||
+- (r = sshbuf_put_cstring(msg, name)) != 0 ||
+- (r = sshbuf_put_cstring(msg, "")) != 0)
+- fatal("%s: buffer error: %s", __func__, ssh_err(r));
+- send_msg(msg);
+- sshbuf_reset(msg);
+-
+- if (recv_msg(msg) == SSH_AGENT_SUCCESS)
+- ret = 0;
+- sshbuf_free(msg);
+- return (ret);
++ struct helper *helper;
++
++ /*
++ * ssh-agent deletes keys before calling this, so the helper entry
++ * should be gone before we get here.
++ */
++ debug3("%s: delete %s", __func__, name);
++ if ((helper = helper_by_provider(name)) != NULL)
++ helper_terminate(helper);
++ return 0;
+ }
+-
+ #endif /* ENABLE_PKCS11 */
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch
new file mode 100644
index 0000000000..e16e5e245e
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-03.patch
@@ -0,0 +1,171 @@
+From 2f1be98e83feb90665b9292eff8bb734537fd491 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 14:02:27 +0000
+Subject: [PATCH 03/12] upstream: Ensure FIDO/PKCS11 libraries contain expected
+ symbols
+
+This checks via nlist(3) that candidate provider libraries contain one
+of the symbols that we will require prior to dlopen(), which can cause
+a number of side effects, including execution of constructors.
+
+Feedback deraadt; ok markus
+
+OpenBSD-Commit-ID: 1508a5fbd74e329e69a55b56c453c292029aefbe
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/29ef8a04866ca14688d5b7fed7b8b9deab851f77]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ misc.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ misc.h | 1 +
+ ssh-pkcs11.c | 4 +++
+ ssh-sk.c | 6 ++--
+ 4 files changed, 86 insertions(+), 2 deletions(-)
+
+diff --git a/misc.c b/misc.c
+index 3a31d5c..8a107e4 100644
+--- a/misc.c
++++ b/misc.c
+@@ -28,6 +28,7 @@
+
+ #include <sys/types.h>
+ #include <sys/ioctl.h>
++#include <sys/mman.h>
+ #include <sys/socket.h>
+ #include <sys/stat.h>
+ #include <sys/time.h>
+@@ -41,6 +42,9 @@
+ #ifdef HAVE_POLL_H
+ #include <poll.h>
+ #endif
++#ifdef HAVE_NLIST_H
++#include <nlist.h>
++#endif
+ #include <signal.h>
+ #include <stdarg.h>
+ #include <stdio.h>
+@@ -2266,3 +2270,76 @@ ssh_signal(int signum, sshsig_t handler)
+ }
+ return osa.sa_handler;
+ }
++
++
++/*
++ * Returns zero if the library at 'path' contains symbol 's', nonzero
++ * otherwise.
++ */
++int
++lib_contains_symbol(const char *path, const char *s)
++{
++#ifdef HAVE_NLIST_H
++ struct nlist nl[2];
++ int ret = -1, r;
++
++ memset(nl, 0, sizeof(nl));
++ nl[0].n_name = xstrdup(s);
++ nl[1].n_name = NULL;
++ if ((r = nlist(path, nl)) == -1) {
++ error("%s: nlist failed for %s", __func__, path);
++ goto out;
++ }
++ if (r != 0 || nl[0].n_value == 0 || nl[0].n_type == 0) {
++ error("%s: library %s does not contain symbol %s", __func__, path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ free(nl[0].n_name);
++ return ret;
++#else /* HAVE_NLIST_H */
++ int fd, ret = -1;
++ struct stat st;
++ void *m = NULL;
++ size_t sz = 0;
++
++ memset(&st, 0, sizeof(st));
++ if ((fd = open(path, O_RDONLY)) < 0) {
++ error("%s: open %s: %s", __func__, path, strerror(errno));
++ return -1;
++ }
++ if (fstat(fd, &st) != 0) {
++ error("%s: fstat %s: %s", __func__, path, strerror(errno));
++ goto out;
++ }
++ if (!S_ISREG(st.st_mode)) {
++ error("%s: %s is not a regular file", __func__, path);
++ goto out;
++ }
++ if (st.st_size < 0 ||
++ (size_t)st.st_size < strlen(s) ||
++ st.st_size >= INT_MAX/2) {
++ error("%s: %s bad size %lld", __func__, path, (long long)st.st_size);
++ goto out;
++ }
++ sz = (size_t)st.st_size;
++ if ((m = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, fd, 0)) == MAP_FAILED ||
++ m == NULL) {
++ error("%s: mmap %s: %s", __func__, path, strerror(errno));
++ goto out;
++ }
++ if (memmem(m, sz, s, strlen(s)) == NULL) {
++ error("%s: %s does not contain expected string %s", __func__, path, s);
++ goto out;
++ }
++ /* success */
++ ret = 0;
++ out:
++ if (m != NULL && m != MAP_FAILED)
++ munmap(m, sz);
++ close(fd);
++ return ret;
++#endif /* HAVE_NLIST_H */
++}
+diff --git a/misc.h b/misc.h
+index 4a05db2..3f9f4db 100644
+--- a/misc.h
++++ b/misc.h
+@@ -86,6 +86,7 @@ const char *atoi_err(const char *, int *);
+ int parse_absolute_time(const char *, uint64_t *);
+ void format_absolute_time(uint64_t, char *, size_t);
+ int path_absolute(const char *);
++int lib_contains_symbol(const char *, const char *);
+
+ void sock_set_v6only(int);
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index b56a41b..639a6f7 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1499,6 +1499,10 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ __func__, provider_id);
+ goto fail;
+ }
++ if (lib_contains_symbol(provider_id, "C_GetFunctionList") != 0) {
++ error("provider %s is not a PKCS11 library", provider_id);
++ goto fail;
++ }
+ /* open shared pkcs11-library */
+ if ((handle = dlopen(provider_id, RTLD_NOW)) == NULL) {
+ error("dlopen %s failed: %s", provider_id, dlerror());
+diff --git a/ssh-sk.c b/ssh-sk.c
+index 5ff9381..9df12cc 100644
+--- a/ssh-sk.c
++++ b/ssh-sk.c
+@@ -119,10 +119,12 @@ sshsk_open(const char *path)
+ #endif
+ return ret;
+ }
+- if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL) {
+- error("Provider \"%s\" dlopen failed: %s", path, dlerror());
++ if (lib_contains_symbol(path, "sk_api_version") != 0) {
++ error("provider %s is not an OpenSSH FIDO library", path);
+ goto fail;
+ }
++ if ((ret->dlhandle = dlopen(path, RTLD_NOW)) == NULL)
++ fatal("Provider \"%s\" dlopen failed: %s", path, dlerror());
+ if ((ret->sk_api_version = dlsym(ret->dlhandle,
+ "sk_api_version")) == NULL) {
+ error("Provider \"%s\" dlsym(sk_api_version) failed: %s",
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch
new file mode 100644
index 0000000000..5e8040c9bf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-04.patch
@@ -0,0 +1,34 @@
+From 0862f338941bfdfb2cadee87de6d5fdca1b8f457 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:55:53 +0000
+Subject: [PATCH 04/12] upstream: terminate process if requested to load a
+ PKCS#11 provider that isn't a PKCS#11 provider; from / ok markus@
+
+OpenBSD-Commit-ID: 39532cf18b115881bb4cfaee32084497aadfa05c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/892506b13654301f69f9545f48213fc210e5c5cc]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-pkcs11.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-pkcs11.c b/ssh-pkcs11.c
+index 639a6f7..7530acc 100644
+--- a/ssh-pkcs11.c
++++ b/ssh-pkcs11.c
+@@ -1508,10 +1508,8 @@ pkcs11_register_provider(char *provider_id, char *pin,
+ error("dlopen %s failed: %s", provider_id, dlerror());
+ goto fail;
+ }
+- if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL) {
+- error("dlsym(C_GetFunctionList) failed: %s", dlerror());
+- goto fail;
+- }
++ if ((getfunctionlist = dlsym(handle, "C_GetFunctionList")) == NULL)
++ fatal("dlsym(C_GetFunctionList) failed: %s", dlerror());
+ p = xcalloc(1, sizeof(*p));
+ p->name = xstrdup(provider_id);
+ p->handle = handle;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch
new file mode 100644
index 0000000000..0ddbdc68d4
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-05.patch
@@ -0,0 +1,194 @@
+From a6cee3905edf070c0de135d3f2ee5b74da1dbd28 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 May 2020 01:26:58 +0000
+Subject: [PATCH 05/12] upstream: Restrict ssh-agent from signing web
+ challenges for FIDO
+
+keys.
+
+When signing messages in ssh-agent using a FIDO key that has an
+application string that does not start with "ssh:", ensure that the
+message being signed is one of the forms expected for the SSH protocol
+(currently pubkey authentication and sshsig signatures).
+
+This prevents ssh-agent forwarding on a host that has FIDO keys
+attached granting the ability for the remote side to sign challenges
+for web authentication using those keys too.
+
+Note that the converse case of web browsers signing SSH challenges is
+already precluded because no web RP can have the "ssh:" prefix in the
+application string that we require.
+
+ok markus@
+
+OpenBSD-Commit-ID: 9ab6012574ed0352d2f097d307f4a988222d1b19
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/0c111eb84efba7c2a38b2cc3278901a0123161b9]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 100 insertions(+), 10 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index ceb348c..1794f35 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.255 2020/02/06 22:30:54 naddy Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.258 2020/05/26 01:26:58 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -77,6 +77,7 @@
+
+ #include "xmalloc.h"
+ #include "ssh.h"
++#include "ssh2.h"
+ #include "sshbuf.h"
+ #include "sshkey.h"
+ #include "authfd.h"
+@@ -167,6 +168,9 @@ static long lifetime = 0;
+
+ static int fingerprint_hash = SSH_FP_HASH_DEFAULT;
+
++/* Refuse signing of non-SSH messages for web-origin FIDO keys */
++static int restrict_websafe = 1;
++
+ static void
+ close_socket(SocketEntry *e)
+ {
+@@ -282,6 +286,80 @@ agent_decode_alg(struct sshkey *key, u_int flags)
+ return NULL;
+ }
+
++/*
++ * This function inspects a message to be signed by a FIDO key that has a
++ * web-like application string (i.e. one that does not begin with "ssh:".
++ * It checks that the message is one of those expected for SSH operations
++ * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
++ * for the web.
++ */
++static int
++check_websafe_message_contents(struct sshkey *key,
++ const u_char *msg, size_t len)
++{
++ int matched = 0;
++ struct sshbuf *b;
++ u_char m, n;
++ char *cp1 = NULL, *cp2 = NULL;
++ int r;
++ struct sshkey *mkey = NULL;
++
++ if ((b = sshbuf_from(msg, len)) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++
++ /* SSH userauth request */
++ if ((r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* sess_id */
++ (r = sshbuf_get_u8(b, &m)) == 0 && /* SSH2_MSG_USERAUTH_REQUEST */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* server user */
++ (r = sshbuf_get_cstring(b, &cp1, NULL)) == 0 && /* service */
++ (r = sshbuf_get_cstring(b, &cp2, NULL)) == 0 && /* method */
++ (r = sshbuf_get_u8(b, &n)) == 0 && /* sig-follows */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* alg */
++ (r = sshkey_froms(b, &mkey)) == 0 && /* key */
++ sshbuf_len(b) == 0) {
++ debug("%s: parsed userauth", __func__);
++ if (m == SSH2_MSG_USERAUTH_REQUEST && n == 1 &&
++ strcmp(cp1, "ssh-connection") == 0 &&
++ strcmp(cp2, "publickey") == 0 &&
++ sshkey_equal(key, mkey)) {
++ debug("%s: well formed userauth", __func__);
++ matched = 1;
++ }
++ }
++ free(cp1);
++ free(cp2);
++ sshkey_free(mkey);
++ sshbuf_free(b);
++ if (matched)
++ return 1;
++
++ if ((b = sshbuf_from(msg, len)) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++ cp1 = cp2 = NULL;
++ mkey = NULL;
++
++ /* SSHSIG */
++ if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) == 0 &&
++ (r = sshbuf_consume(b, 6)) == 0 &&
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* namespace */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* reserved */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* hashalg */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* H(msg) */
++ sshbuf_len(b) == 0) {
++ debug("%s: parsed sshsig", __func__);
++ matched = 1;
++ }
++
++ sshbuf_free(b);
++ if (matched)
++ return 1;
++
++ /* XXX CA signature operation */
++
++ error("web-origin key attempting to sign non-SSH message");
++ return 0;
++}
++
+ /* ssh2 only */
+ static void
+ process_sign_request2(SocketEntry *e)
+@@ -314,14 +392,20 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+- if (sshkey_is_sk(id->key) &&
+- (id->key->sk_flags & SSH_SK_USER_PRESENCE_REQD)) {
+- if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
+- SSH_FP_DEFAULT)) == NULL)
+- fatal("%s: fingerprint failed", __func__);
+- notifier = notify_start(0,
+- "Confirm user presence for key %s %s",
+- sshkey_type(id->key), fp);
++ if (sshkey_is_sk(id->key)) {
++ if (strncmp(id->key->sk_application, "ssh:", 4) != 0 &&
++ !check_websafe_message_contents(key, data, dlen)) {
++ /* error already logged */
++ goto send;
++ }
++ if ((id->key->sk_flags & SSH_SK_USER_PRESENCE_REQD)) {
++ if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
++ SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ notifier = notify_start(0,
++ "Confirm user presence for key %s %s",
++ sshkey_type(id->key), fp);
++ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+ data, dlen, agent_decode_alg(key, flags),
+@@ -1214,7 +1298,7 @@ main(int ac, char **av)
+ __progname = ssh_get_progname(av[0]);
+ seed_rng();
+
+- while ((ch = getopt(ac, av, "cDdksE:a:P:t:")) != -1) {
++ while ((ch = getopt(ac, av, "cDdksE:a:O:P:t:")) != -1) {
+ switch (ch) {
+ case 'E':
+ fingerprint_hash = ssh_digest_alg_by_name(optarg);
+@@ -1229,6 +1313,12 @@ main(int ac, char **av)
+ case 'k':
+ k_flag++;
+ break;
++ case 'O':
++ if (strcmp(optarg, "no-restrict-websafe") == 0)
++ restrict_websafe = 0;
++ else
++ fatal("Unknown -O option");
++ break;
+ case 'P':
+ if (provider_whitelist != NULL)
+ fatal("-P option already specified");
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch
new file mode 100644
index 0000000000..ac494aab0b
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-06.patch
@@ -0,0 +1,73 @@
+From a5d845b7b42861d18f43e83de9f24c7374d1b458 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 18 Sep 2020 08:16:38 +0000
+Subject: [PATCH 06/12] upstream: handle multiple messages in a single read()
+
+PR#183 by Dennis Kaarsemaker; feedback and ok markus@
+
+OpenBSD-Commit-ID: 8570bb4d02d00cf70b98590716ea6a7d1cce68d1
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/52a03e9fca2d74eef953ddd4709250f365ca3975]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 1794f35..78f7268 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.258 2020/05/26 01:26:58 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.264 2020/09/18 08:16:38 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -853,8 +853,10 @@ send:
+ }
+ #endif /* ENABLE_PKCS11 */
+
+-/* dispatch incoming messages */
+-
++/*
++ * dispatch incoming message.
++ * returns 1 on success, 0 for incomplete messages or -1 on error.
++ */
+ static int
+ process_message(u_int socknum)
+ {
+@@ -908,7 +910,7 @@ process_message(u_int socknum)
+ /* send a fail message for all other request types */
+ send_status(e, 0);
+ }
+- return 0;
++ return 1;
+ }
+
+ switch (type) {
+@@ -952,7 +954,7 @@ process_message(u_int socknum)
+ send_status(e, 0);
+ break;
+ }
+- return 0;
++ return 1;
+ }
+
+ static void
+@@ -1043,7 +1045,12 @@ handle_conn_read(u_int socknum)
+ if ((r = sshbuf_put(sockets[socknum].input, buf, len)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+ explicit_bzero(buf, sizeof(buf));
+- process_message(socknum);
++ for (;;) {
++ if ((r = process_message(socknum)) == -1)
++ return -1;
++ else if (r == 0)
++ break;
++ }
+ return 0;
+ }
+
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch
new file mode 100644
index 0000000000..0dcf23ae17
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-07.patch
@@ -0,0 +1,125 @@
+From 653cc18c922fc387b3d3aa1b081c5e5283cce28a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 Jan 2021 00:47:47 +0000
+Subject: [PATCH 07/12] upstream: use recallocarray to allocate the agent
+ sockets table;
+
+also clear socket entries that are being marked as unused.
+
+spinkle in some debug2() spam to make it easier to watch an agent
+do its thing.
+
+ok markus
+
+OpenBSD-Commit-ID: 74582c8e82e96afea46f6c7b6813a429cbc75922
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1fe16fd61bb53944ec510882acc0491abd66ff76]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 78f7268..2635bc5 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.264 2020/09/18 08:16:38 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.269 2021/01/26 00:47:47 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -175,11 +175,12 @@ static void
+ close_socket(SocketEntry *e)
+ {
+ close(e->fd);
+- e->fd = -1;
+- e->type = AUTH_UNUSED;
+ sshbuf_free(e->input);
+ sshbuf_free(e->output);
+ sshbuf_free(e->request);
++ memset(e, '\0', sizeof(*e));
++ e->fd = -1;
++ e->type = AUTH_UNUSED;
+ }
+
+ static void
+@@ -249,6 +250,8 @@ process_request_identities(SocketEntry *e)
+ struct sshbuf *msg;
+ int r;
+
++ debug2("%s: entering", __func__);
++
+ if ((msg = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+ if ((r = sshbuf_put_u8(msg, SSH2_AGENT_IDENTITIES_ANSWER)) != 0 ||
+@@ -441,6 +444,7 @@ process_remove_identity(SocketEntry *e)
+ struct sshkey *key = NULL;
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshkey_froms(e->request, &key)) != 0) {
+ error("%s: get key: %s", __func__, ssh_err(r));
+ goto done;
+@@ -467,6 +471,7 @@ process_remove_all_identities(SocketEntry *e)
+ {
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ /* Loop over all identities and clear the keys. */
+ for (id = TAILQ_FIRST(&idtab->idlist); id;
+ id = TAILQ_FIRST(&idtab->idlist)) {
+@@ -520,6 +525,7 @@ process_add_identity(SocketEntry *e)
+ u_char ctype;
+ int r = SSH_ERR_INTERNAL_ERROR;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshkey_private_deserialize(e->request, &k)) != 0 ||
+ k == NULL ||
+ (r = sshbuf_get_cstring(e->request, &comment, NULL)) != 0) {
+@@ -667,6 +673,7 @@ process_lock_agent(SocketEntry *e, int lock)
+ static u_int fail_count = 0;
+ size_t pwlen;
+
++ debug2("%s: entering", __func__);
+ /*
+ * This is deliberately fatal: the user has requested that we lock,
+ * but we can't parse their request properly. The only safe thing to
+@@ -738,6 +745,7 @@ process_add_smartcard_key(SocketEntry *e)
+ struct sshkey **keys = NULL, *k;
+ Identity *id;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 ||
+ (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) {
+ error("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -818,6 +826,7 @@ process_remove_smartcard_key(SocketEntry *e)
+ int r, success = 0;
+ Identity *id, *nxt;
+
++ debug2("%s: entering", __func__);
+ if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 ||
+ (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) {
+ error("%s: buffer error: %s", __func__, ssh_err(r));
+@@ -962,6 +971,8 @@ new_socket(sock_type type, int fd)
+ {
+ u_int i, old_alloc, new_alloc;
+
++ debug("%s: type = %s", __func__, type == AUTH_CONNECTION ? "CONNECTION" :
++ (type == AUTH_SOCKET ? "SOCKET" : "UNKNOWN"));
+ set_nonblock(fd);
+
+ if (fd > max_fd)
+@@ -981,7 +992,8 @@ new_socket(sock_type type, int fd)
+ }
+ old_alloc = sockets_alloc;
+ new_alloc = sockets_alloc + 10;
+- sockets = xreallocarray(sockets, new_alloc, sizeof(sockets[0]));
++ sockets = xrecallocarray(sockets, old_alloc, new_alloc,
++ sizeof(sockets[0]));
+ for (i = old_alloc; i < new_alloc; i++)
+ sockets[i].type = AUTH_UNUSED;
+ sockets_alloc = new_alloc;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch
new file mode 100644
index 0000000000..141c8113bf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-08.patch
@@ -0,0 +1,315 @@
+From c30158ea225cf8ad67c3dcc88fa9e4afbf8959a7 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Tue, 26 Jan 2021 00:53:31 +0000
+Subject: [PATCH 08/12] upstream: more ssh-agent refactoring
+
+Allow confirm_key() to accept an additional reason suffix
+
+Factor publickey userauth parsing out into its own function and allow
+it to optionally return things it parsed out of the message to its
+caller.
+
+feedback/ok markus@
+
+OpenBSD-Commit-ID: 29006515617d1aa2d8b85cd2bf667e849146477e
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/e0e8bee8024fa9e31974244d14f03d799e5c0775]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 197 ++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 130 insertions(+), 67 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 2635bc5..7ad323c 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.269 2021/01/26 00:47:47 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.270 2021/01/26 00:53:31 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -216,15 +216,16 @@ lookup_identity(struct sshkey *key)
+
+ /* Check confirmation of keysign request */
+ static int
+-confirm_key(Identity *id)
++confirm_key(Identity *id, const char *extra)
+ {
+ char *p;
+ int ret = -1;
+
+ p = sshkey_fingerprint(id->key, fingerprint_hash, SSH_FP_DEFAULT);
+ if (p != NULL &&
+- ask_permission("Allow use of key %s?\nKey fingerprint %s.",
+- id->comment, p))
++ ask_permission("Allow use of key %s?\nKey fingerprint %s.%s%s",
++ id->comment, p,
++ extra == NULL ? "" : "\n", extra == NULL ? "" : extra))
+ ret = 0;
+ free(p);
+
+@@ -290,74 +291,133 @@ agent_decode_alg(struct sshkey *key, u_int flags)
+ }
+
+ /*
+- * This function inspects a message to be signed by a FIDO key that has a
+- * web-like application string (i.e. one that does not begin with "ssh:".
+- * It checks that the message is one of those expected for SSH operations
+- * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
+- * for the web.
++ * Attempt to parse the contents of a buffer as a SSH publickey userauth
++ * request, checking its contents for consistency and matching the embedded
++ * key against the one that is being used for signing.
++ * Note: does not modify msg buffer.
++ * Optionally extract the username and session ID from the request.
+ */
+ static int
+-check_websafe_message_contents(struct sshkey *key,
+- const u_char *msg, size_t len)
++parse_userauth_request(struct sshbuf *msg, const struct sshkey *expected_key,
++ char **userp, struct sshbuf **sess_idp)
+ {
+- int matched = 0;
+- struct sshbuf *b;
+- u_char m, n;
+- char *cp1 = NULL, *cp2 = NULL;
++ struct sshbuf *b = NULL, *sess_id = NULL;
++ char *user = NULL, *service = NULL, *method = NULL, *pkalg = NULL;
+ int r;
++ u_char t, sig_follows;
+ struct sshkey *mkey = NULL;
+
+- if ((b = sshbuf_from(msg, len)) == NULL)
+- fatal("%s: sshbuf_new", __func__);
++ if (userp != NULL)
++ *userp = NULL;
++ if (sess_idp != NULL)
++ *sess_idp = NULL;
++ if ((b = sshbuf_fromb(msg)) == NULL)
++ fatal("%s: sshbuf_fromb", __func__);
+
+ /* SSH userauth request */
+- if ((r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* sess_id */
+- (r = sshbuf_get_u8(b, &m)) == 0 && /* SSH2_MSG_USERAUTH_REQUEST */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* server user */
+- (r = sshbuf_get_cstring(b, &cp1, NULL)) == 0 && /* service */
+- (r = sshbuf_get_cstring(b, &cp2, NULL)) == 0 && /* method */
+- (r = sshbuf_get_u8(b, &n)) == 0 && /* sig-follows */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* alg */
+- (r = sshkey_froms(b, &mkey)) == 0 && /* key */
+- sshbuf_len(b) == 0) {
+- debug("%s: parsed userauth", __func__);
+- if (m == SSH2_MSG_USERAUTH_REQUEST && n == 1 &&
+- strcmp(cp1, "ssh-connection") == 0 &&
+- strcmp(cp2, "publickey") == 0 &&
+- sshkey_equal(key, mkey)) {
+- debug("%s: well formed userauth", __func__);
+- matched = 1;
+- }
++ if ((r = sshbuf_froms(b, &sess_id)) != 0)
++ goto out;
++ if (sshbuf_len(sess_id) == 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
+ }
+- free(cp1);
+- free(cp2);
+- sshkey_free(mkey);
++ if ((r = sshbuf_get_u8(b, &t)) != 0 || /* SSH2_MSG_USERAUTH_REQUEST */
++ (r = sshbuf_get_cstring(b, &user, NULL)) != 0 || /* server user */
++ (r = sshbuf_get_cstring(b, &service, NULL)) != 0 || /* service */
++ (r = sshbuf_get_cstring(b, &method, NULL)) != 0 || /* method */
++ (r = sshbuf_get_u8(b, &sig_follows)) != 0 || /* sig-follows */
++ (r = sshbuf_get_cstring(b, &pkalg, NULL)) != 0 || /* alg */
++ (r = sshkey_froms(b, &mkey)) != 0) /* key */
++ goto out;
++ if (t != SSH2_MSG_USERAUTH_REQUEST ||
++ sig_follows != 1 ||
++ strcmp(service, "ssh-connection") != 0 ||
++ !sshkey_equal(expected_key, mkey) ||
++ sshkey_type_from_name(pkalg) != expected_key->type) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ if (strcmp(method, "publickey") != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ if (sshbuf_len(b) != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ /* success */
++ r = 0;
++ debug("%s: well formed userauth", __func__);
++ if (userp != NULL) {
++ *userp = user;
++ user = NULL;
++ }
++ if (sess_idp != NULL) {
++ *sess_idp = sess_id;
++ sess_id = NULL;
++ }
++ out:
+ sshbuf_free(b);
+- if (matched)
+- return 1;
++ sshbuf_free(sess_id);
++ free(user);
++ free(service);
++ free(method);
++ free(pkalg);
++ sshkey_free(mkey);
++ return r;
++}
+
+- if ((b = sshbuf_from(msg, len)) == NULL)
+- fatal("%s: sshbuf_new", __func__);
+- cp1 = cp2 = NULL;
+- mkey = NULL;
+-
+- /* SSHSIG */
+- if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) == 0 &&
+- (r = sshbuf_consume(b, 6)) == 0 &&
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* namespace */
+- (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* reserved */
+- (r = sshbuf_get_cstring(b, NULL, NULL)) == 0 && /* hashalg */
+- (r = sshbuf_get_string_direct(b, NULL, NULL)) == 0 && /* H(msg) */
+- sshbuf_len(b) == 0) {
+- debug("%s: parsed sshsig", __func__);
+- matched = 1;
+- }
++/*
++ * Attempt to parse the contents of a buffer as a SSHSIG signature request.
++ * Note: does not modify buffer.
++ */
++static int
++parse_sshsig_request(struct sshbuf *msg)
++{
++ int r;
++ struct sshbuf *b;
+
++ if ((b = sshbuf_fromb(msg)) == NULL)
++ fatal("%s: sshbuf_fromb", __func__);
++
++ if ((r = sshbuf_cmp(b, 0, "SSHSIG", 6)) != 0 ||
++ (r = sshbuf_consume(b, 6)) != 0 ||
++ (r = sshbuf_get_cstring(b, NULL, NULL)) != 0 || /* namespace */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) != 0 || /* reserved */
++ (r = sshbuf_get_cstring(b, NULL, NULL)) != 0 || /* hashalg */
++ (r = sshbuf_get_string_direct(b, NULL, NULL)) != 0) /* H(msg) */
++ goto out;
++ if (sshbuf_len(b) != 0) {
++ r = SSH_ERR_INVALID_FORMAT;
++ goto out;
++ }
++ /* success */
++ r = 0;
++ out:
+ sshbuf_free(b);
+- if (matched)
++ return r;
++}
++
++/*
++ * This function inspects a message to be signed by a FIDO key that has a
++ * web-like application string (i.e. one that does not begin with "ssh:".
++ * It checks that the message is one of those expected for SSH operations
++ * (pubkey userauth, sshsig, CA key signing) to exclude signing challenges
++ * for the web.
++ */
++static int
++check_websafe_message_contents(struct sshkey *key, struct sshbuf *data)
++{
++ if (parse_userauth_request(data, key, NULL, NULL) == 0) {
++ debug("%s: signed data matches public key userauth request", __func__);
+ return 1;
++ }
++ if (parse_sshsig_request(data) == 0) {
++ debug("%s: signed data matches SSHSIG signature request", __func__);
++ return 1;
++ }
+
+- /* XXX CA signature operation */
++ /* XXX check CA signature operation */
+
+ error("web-origin key attempting to sign non-SSH message");
+ return 0;
+@@ -367,21 +427,22 @@ check_websafe_message_contents(struct sshkey *key,
+ static void
+ process_sign_request2(SocketEntry *e)
+ {
+- const u_char *data;
+ u_char *signature = NULL;
+- size_t dlen, slen = 0;
++ size_t i, slen = 0;
+ u_int compat = 0, flags;
+ int r, ok = -1;
+ char *fp = NULL;
+- struct sshbuf *msg;
++ struct sshbuf *msg = NULL, *data = NULL;
+ struct sshkey *key = NULL;
+ struct identity *id;
+ struct notifier_ctx *notifier = NULL;
+
+- if ((msg = sshbuf_new()) == NULL)
++ debug("%s: entering", __func__);
++
++ if ((msg = sshbuf_new()) == NULL | (data = sshbuf_new()) == NULL)
+ fatal("%s: sshbuf_new failed", __func__);
+ if ((r = sshkey_froms(e->request, &key)) != 0 ||
+- (r = sshbuf_get_string_direct(e->request, &data, &dlen)) != 0 ||
++ (r = sshbuf_get_stringb(e->request, data)) != 0 ||
+ (r = sshbuf_get_u32(e->request, &flags)) != 0) {
+ error("%s: couldn't parse request: %s", __func__, ssh_err(r));
+ goto send;
+@@ -391,13 +452,13 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: %s key not found", __func__, sshkey_type(key));
+ goto send;
+ }
+- if (id->confirm && confirm_key(id) != 0) {
++ if (id->confirm && confirm_key(id, NULL) != 0) {
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+ if (sshkey_is_sk(id->key)) {
+ if (strncmp(id->key->sk_application, "ssh:", 4) != 0 &&
+- !check_websafe_message_contents(key, data, dlen)) {
++ !check_websafe_message_contents(key, data)) {
+ /* error already logged */
+ goto send;
+ }
+@@ -411,7 +472,7 @@ process_sign_request2(SocketEntry *e)
+ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+- data, dlen, agent_decode_alg(key, flags),
++ sshbuf_ptr(data), sshbuf_len(data), agent_decode_alg(key, flags),
+ id->sk_provider, compat)) != 0) {
+ error("%s: sshkey_sign: %s", __func__, ssh_err(r));
+ goto send;
+@@ -420,8 +481,7 @@ process_sign_request2(SocketEntry *e)
+ ok = 0;
+ send:
+ notify_complete(notifier);
+- sshkey_free(key);
+- free(fp);
++
+ if (ok == 0) {
+ if ((r = sshbuf_put_u8(msg, SSH2_AGENT_SIGN_RESPONSE)) != 0 ||
+ (r = sshbuf_put_string(msg, signature, slen)) != 0)
+@@ -432,7 +492,10 @@ process_sign_request2(SocketEntry *e)
+ if ((r = sshbuf_put_stringb(e->output, msg)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+
++ sshbuf_free(data);
+ sshbuf_free(msg);
++ sshkey_free(key);
++ free(fp);
+ free(signature);
+ }
+
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch
new file mode 100644
index 0000000000..b519ccce42
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-09.patch
@@ -0,0 +1,38 @@
+From 7adba46611e5d076d7d12d9f4162dd4cabd5ff50 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 29 Jan 2021 06:28:10 +0000
+Subject: [PATCH 09/12] upstream: give typedef'd struct a struct name; makes
+ the fuzzer I'm
+
+writing a bit easier
+
+OpenBSD-Commit-ID: 1052ab521505a4d8384d67acb3974ef81b8896cb
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/8afaa7d7918419d3da6c0477b83db2159879cb33]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 7ad323c..c99927c 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.270 2021/01/26 00:53:31 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.274 2021/01/29 06:28:10 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -108,7 +108,7 @@ typedef enum {
+ AUTH_CONNECTION
+ } sock_type;
+
+-typedef struct {
++typedef struct socket_entry {
+ int fd;
+ sock_type type;
+ struct sshbuf *input;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch
new file mode 100644
index 0000000000..27b2eadfae
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-10.patch
@@ -0,0 +1,39 @@
+From 343e2a2c0ef754a7a86118016b248f7a73f8d510 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Fri, 29 Jan 2021 06:29:46 +0000
+Subject: [PATCH 10/12] upstream: fix the values of enum sock_type
+
+OpenBSD-Commit-ID: 18d048f4dbfbb159ff500cfc2700b8fb1407facd
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1a4b92758690faa12f49079dd3b72567f909466d]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/ssh-agent.c b/ssh-agent.c
+index c99927c..7f1e14b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.274 2021/01/29 06:28:10 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.275 2021/01/29 06:29:46 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -103,9 +103,9 @@
+ #define AGENT_RBUF_LEN (4096)
+
+ typedef enum {
+- AUTH_UNUSED,
+- AUTH_SOCKET,
+- AUTH_CONNECTION
++ AUTH_UNUSED = 0,
++ AUTH_SOCKET = 1,
++ AUTH_CONNECTION = 2,
+ } sock_type;
+
+ typedef struct socket_entry {
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch
new file mode 100644
index 0000000000..c300393ebf
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-11.patch
@@ -0,0 +1,307 @@
+From 2b3b369c8cf71f9ef5942a5e074e6f86e7ca1e0c Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Sun, 19 Dec 2021 22:09:23 +0000
+Subject: [PATCH 11/12] upstream: ssh-agent side of binding
+
+record session ID/hostkey/forwarding status for each active socket.
+
+Attempt to parse data-to-be-signed at signature request time and extract
+session ID from the blob if it is a pubkey userauth request.
+
+ok markus@
+
+OpenBSD-Commit-ID: a80fd41e292b18b67508362129e9fed549abd318
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/4c1e3ce85e183a9d0c955c88589fed18e4d6a058]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ authfd.h | 3 +
+ ssh-agent.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 170 insertions(+), 8 deletions(-)
+
+diff --git a/authfd.h b/authfd.h
+index c3bf625..9cc9807 100644
+--- a/authfd.h
++++ b/authfd.h
+@@ -76,6 +76,9 @@ int ssh_agent_sign(int sock, const struct sshkey *key,
+ #define SSH2_AGENTC_ADD_ID_CONSTRAINED 25
+ #define SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED 26
+
++/* generic extension mechanism */
++#define SSH_AGENTC_EXTENSION 27
++
+ #define SSH_AGENT_CONSTRAIN_LIFETIME 1
+ #define SSH_AGENT_CONSTRAIN_CONFIRM 2
+ #define SSH_AGENT_CONSTRAIN_MAXSIGN 3
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 7f1e14b..01c7f2b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.275 2021/01/29 06:29:46 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.280 2021/12/19 22:09:23 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -98,9 +98,15 @@
+ #endif
+
+ /* Maximum accepted message length */
+-#define AGENT_MAX_LEN (256*1024)
++#define AGENT_MAX_LEN (256*1024)
+ /* Maximum bytes to read from client socket */
+-#define AGENT_RBUF_LEN (4096)
++#define AGENT_RBUF_LEN (4096)
++/* Maximum number of recorded session IDs/hostkeys per connection */
++#define AGENT_MAX_SESSION_IDS 16
++/* Maximum size of session ID */
++#define AGENT_MAX_SID_LEN 128
++
++/* XXX store hostkey_sid in a refcounted tree */
+
+ typedef enum {
+ AUTH_UNUSED = 0,
+@@ -108,12 +114,20 @@ typedef enum {
+ AUTH_CONNECTION = 2,
+ } sock_type;
+
++struct hostkey_sid {
++ struct sshkey *key;
++ struct sshbuf *sid;
++ int forwarded;
++};
++
+ typedef struct socket_entry {
+ int fd;
+ sock_type type;
+ struct sshbuf *input;
+ struct sshbuf *output;
+ struct sshbuf *request;
++ size_t nsession_ids;
++ struct hostkey_sid *session_ids;
+ } SocketEntry;
+
+ u_int sockets_alloc = 0;
+@@ -174,10 +188,17 @@ static int restrict_websafe = 1;
+ static void
+ close_socket(SocketEntry *e)
+ {
++ size_t i;
++
+ close(e->fd);
+ sshbuf_free(e->input);
+ sshbuf_free(e->output);
+ sshbuf_free(e->request);
++ for (i = 0; i < e->nsession_ids; i++) {
++ sshkey_free(e->session_ids[i].key);
++ sshbuf_free(e->session_ids[i].sid);
++ }
++ free(e->session_ids);
+ memset(e, '\0', sizeof(*e));
+ e->fd = -1;
+ e->type = AUTH_UNUSED;
+@@ -423,6 +444,18 @@ check_websafe_message_contents(struct sshkey *key, struct sshbuf *data)
+ return 0;
+ }
+
++static int
++buf_equal(const struct sshbuf *a, const struct sshbuf *b)
++{
++ if (sshbuf_ptr(a) == NULL || sshbuf_ptr(b) == NULL)
++ return SSH_ERR_INVALID_ARGUMENT;
++ if (sshbuf_len(a) != sshbuf_len(b))
++ return SSH_ERR_INVALID_FORMAT;
++ if (timingsafe_bcmp(sshbuf_ptr(a), sshbuf_ptr(b), sshbuf_len(a)) != 0)
++ return SSH_ERR_INVALID_FORMAT;
++ return 0;
++}
++
+ /* ssh2 only */
+ static void
+ process_sign_request2(SocketEntry *e)
+@@ -431,8 +464,8 @@ process_sign_request2(SocketEntry *e)
+ size_t i, slen = 0;
+ u_int compat = 0, flags;
+ int r, ok = -1;
+- char *fp = NULL;
+- struct sshbuf *msg = NULL, *data = NULL;
++ char *fp = NULL, *user = NULL, *sig_dest = NULL;
++ struct sshbuf *msg = NULL, *data = NULL, *sid = NULL;
+ struct sshkey *key = NULL;
+ struct identity *id;
+ struct notifier_ctx *notifier = NULL;
+@@ -452,7 +485,33 @@ process_sign_request2(SocketEntry *e)
+ verbose("%s: %s key not found", __func__, sshkey_type(key));
+ goto send;
+ }
+- if (id->confirm && confirm_key(id, NULL) != 0) {
++ /*
++ * If session IDs were recorded for this socket, then use them to
++ * annotate the confirmation messages with the host keys.
++ */
++ if (e->nsession_ids > 0 &&
++ parse_userauth_request(data, key, &user, &sid) == 0) {
++ /*
++ * session ID from userauth request should match the final
++ * ID in the list recorded in the socket, unless the ssh
++ * client at that point lacks the binding extension (or if
++ * an attacker is trying to steal use of the agent).
++ */
++ i = e->nsession_ids - 1;
++ if (buf_equal(sid, e->session_ids[i].sid) == 0) {
++ if ((fp = sshkey_fingerprint(e->session_ids[i].key,
++ SSH_FP_HASH_DEFAULT, SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ debug3("%s: destination %s %s (slot %zu)", __func__,
++ sshkey_type(e->session_ids[i].key), fp, i);
++ xasprintf(&sig_dest, "public key request for "
++ "target user \"%s\" to %s %s", user,
++ sshkey_type(e->session_ids[i].key), fp);
++ free(fp);
++ fp = NULL;
++ }
++ }//
++ if (id->confirm && confirm_key(id, sig_dest) != 0) {
+ verbose("%s: user refused key", __func__);
+ goto send;
+ }
+@@ -467,8 +526,10 @@ process_sign_request2(SocketEntry *e)
+ SSH_FP_DEFAULT)) == NULL)
+ fatal("%s: fingerprint failed", __func__);
+ notifier = notify_start(0,
+- "Confirm user presence for key %s %s",
+- sshkey_type(id->key), fp);
++ "Confirm user presence for key %s %s%s%s",
++ sshkey_type(id->key), fp,
++ sig_dest == NULL ? "" : "\n",
++ sig_dest == NULL ? "" : sig_dest);
+ }
+ }
+ if ((r = sshkey_sign(id->key, &signature, &slen,
+@@ -492,11 +553,14 @@ process_sign_request2(SocketEntry *e)
+ if ((r = sshbuf_put_stringb(e->output, msg)) != 0)
+ fatal("%s: buffer error: %s", __func__, ssh_err(r));
+
++ sshbuf_free(sid);
+ sshbuf_free(data);
+ sshbuf_free(msg);
+ sshkey_free(key);
+ free(fp);
+ free(signature);
++ free(sig_dest);
++ free(user);
+ }
+
+ /* shared */
+@@ -925,6 +989,98 @@ send:
+ }
+ #endif /* ENABLE_PKCS11 */
+
++static int
++process_ext_session_bind(SocketEntry *e)
++{
++ int r, sid_match, key_match;
++ struct sshkey *key = NULL;
++ struct sshbuf *sid = NULL, *sig = NULL;
++ char *fp = NULL;
++ u_char fwd;
++ size_t i;
++
++ debug2("%s: entering", __func__);
++ if ((r = sshkey_froms(e->request, &key)) != 0 ||
++ (r = sshbuf_froms(e->request, &sid)) != 0 ||
++ (r = sshbuf_froms(e->request, &sig)) != 0 ||
++ (r = sshbuf_get_u8(e->request, &fwd)) != 0) {
++ error("%s: parse: %s", __func__, ssh_err(r));
++ goto out;
++ }
++ if ((fp = sshkey_fingerprint(key, SSH_FP_HASH_DEFAULT,
++ SSH_FP_DEFAULT)) == NULL)
++ fatal("%s: fingerprint failed", __func__);
++ /* check signature with hostkey on session ID */
++ if ((r = sshkey_verify(key, sshbuf_ptr(sig), sshbuf_len(sig),
++ sshbuf_ptr(sid), sshbuf_len(sid), NULL, 0, NULL)) != 0) {
++ error("%s: sshkey_verify for %s %s: %s", __func__, sshkey_type(key), fp, ssh_err(r));
++ goto out;
++ }
++ /* check whether sid/key already recorded */
++ for (i = 0; i < e->nsession_ids; i++) {
++ sid_match = buf_equal(sid, e->session_ids[i].sid) == 0;
++ key_match = sshkey_equal(key, e->session_ids[i].key);
++ if (sid_match && key_match) {
++ debug("%s: session ID already recorded for %s %s", __func__,
++ sshkey_type(key), fp);
++ r = 0;
++ goto out;
++ } else if (sid_match) {
++ error("%s: session ID recorded against different key "
++ "for %s %s", __func__, sshkey_type(key), fp);
++ r = -1;
++ goto out;
++ }
++ /*
++ * new sid with previously-seen key can happen, e.g. multiple
++ * connections to the same host.
++ */
++ }
++ /* record new key/sid */
++ if (e->nsession_ids >= AGENT_MAX_SESSION_IDS) {
++ error("%s: too many session IDs recorded", __func__);
++ goto out;
++ }
++ e->session_ids = xrecallocarray(e->session_ids, e->nsession_ids,
++ e->nsession_ids + 1, sizeof(*e->session_ids));
++ i = e->nsession_ids++;
++ debug("%s: recorded %s %s (slot %zu of %d)", __func__, sshkey_type(key), fp, i,
++ AGENT_MAX_SESSION_IDS);
++ e->session_ids[i].key = key;
++ e->session_ids[i].forwarded = fwd != 0;
++ key = NULL; /* transferred */
++ /* can't transfer sid; it's refcounted and scoped to request's life */
++ if ((e->session_ids[i].sid = sshbuf_new()) == NULL)
++ fatal("%s: sshbuf_new", __func__);
++ if ((r = sshbuf_putb(e->session_ids[i].sid, sid)) != 0)
++ fatal("%s: sshbuf_putb session ID: %s", __func__, ssh_err(r));
++ /* success */
++ r = 0;
++ out:
++ sshkey_free(key);
++ sshbuf_free(sid);
++ sshbuf_free(sig);
++ return r == 0 ? 1 : 0;
++}
++
++static void
++process_extension(SocketEntry *e)
++{
++ int r, success = 0;
++ char *name;
++
++ debug2("%s: entering", __func__);
++ if ((r = sshbuf_get_cstring(e->request, &name, NULL)) != 0) {
++ error("%s: parse: %s", __func__, ssh_err(r));
++ goto send;
++ }
++ if (strcmp(name, "session-bind@openssh.com") == 0)
++ success = process_ext_session_bind(e);
++ else
++ debug("%s: unsupported extension \"%s\"", __func__, name);
++send:
++ send_status(e, success);
++}
+ /*
+ * dispatch incoming message.
+ * returns 1 on success, 0 for incomplete messages or -1 on error.
+@@ -1019,6 +1175,9 @@ process_message(u_int socknum)
+ process_remove_smartcard_key(e);
+ break;
+ #endif /* ENABLE_PKCS11 */
++ case SSH_AGENTC_EXTENSION:
++ process_extension(e);
++ break;
+ default:
+ /* Unknown message. Respond with failure. */
+ error("Unknown message %d", type);
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch
new file mode 100644
index 0000000000..934775bdec
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-38408-12.patch
@@ -0,0 +1,120 @@
+From 4fe3d0fbd3d6dc1f19354e0d73a3231c461ed044 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Wed, 19 Jul 2023 13:56:33 +0000
+Subject: [PATCH 12/12] upstream: Disallow remote addition of FIDO/PKCS11
+ provider libraries to ssh-agent by default.
+
+The old behaviour of allowing remote clients from loading providers
+can be restored using `ssh-agent -O allow-remote-pkcs11`.
+
+Detection of local/remote clients requires a ssh(1) that supports
+the `session-bind@openssh.com` extension. Forwarding access to a
+ssh-agent socket using non-OpenSSH tools may circumvent this control.
+
+ok markus@
+
+OpenBSD-Commit-ID: 4c2bdf79b214ae7e60cc8c39a45501344fa7bd7c
+
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/1f2731f5d7a8f8a8385c6031667ed29072c0d92a]
+CVE: CVE-2023-38408
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ ssh-agent.1 | 20 ++++++++++++++++++++
+ ssh-agent.c | 26 ++++++++++++++++++++++++--
+ 2 files changed, 44 insertions(+), 2 deletions(-)
+
+diff --git a/ssh-agent.1 b/ssh-agent.1
+index fff0db6..a0f1e21 100644
+--- a/ssh-agent.1
++++ b/ssh-agent.1
+@@ -97,6 +97,26 @@ The default is
+ Kill the current agent (given by the
+ .Ev SSH_AGENT_PID
+ environment variable).
++Currently two options are supported:
++.Cm allow-remote-pkcs11
++and
++.Pp
++The
++.Cm allow-remote-pkcs11
++option allows clients of a forwarded
++.Nm
++to load PKCS#11 or FIDO provider libraries.
++By default only local clients may perform this operation.
++Note that signalling that a
++.Nm
++client remote is performed by
++.Xr ssh 1 ,
++and use of other tools to forward access to the agent socket may circumvent
++this restriction.
++.Pp
++The
++.Cm no-restrict-websafe ,
++instructs
+ .It Fl P Ar provider_whitelist
+ Specify a pattern-list of acceptable paths for PKCS#11 and FIDO authenticator
+ shared libraries that may be used with the
+diff --git a/ssh-agent.c b/ssh-agent.c
+index 01c7f2b..40c1b6b 100644
+--- a/ssh-agent.c
++++ b/ssh-agent.c
+@@ -1,4 +1,4 @@
+-/* $OpenBSD: ssh-agent.c,v 1.280 2021/12/19 22:09:23 djm Exp $ */
++/* $OpenBSD: ssh-agent.c,v 1.300 2023/07/19 13:56:33 djm Exp $ */
+ /*
+ * Author: Tatu Ylonen <ylo@cs.hut.fi>
+ * Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+@@ -167,6 +167,12 @@ char socket_dir[PATH_MAX];
+ /* PKCS#11/Security key path whitelist */
+ static char *provider_whitelist;
+
++/*
++ * Allows PKCS11 providers or SK keys that use non-internal providers to
++ * be added over a remote connection (identified by session-bind@openssh.com).
++ */
++static int remote_add_provider;
++
+ /* locking */
+ #define LOCK_SIZE 32
+ #define LOCK_SALT_SIZE 16
+@@ -736,6 +742,15 @@ process_add_identity(SocketEntry *e)
+ if (strcasecmp(sk_provider, "internal") == 0) {
+ debug("%s: internal provider", __func__);
+ } else {
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed add of SK provider \"%.100s\": "
++ "remote addition of providers is disabled",
++ sk_provider);
++ free(sk_provider);
++ free(comment);
++ sshkey_free(k);
++ goto send;
++ }
+ if (realpath(sk_provider, canonical_provider) == NULL) {
+ verbose("failed provider \"%.100s\": "
+ "realpath: %s", sk_provider,
+@@ -901,6 +916,11 @@ process_add_smartcard_key(SocketEntry *e)
+ goto send;
+ }
+ }
++ if (e->nsession_ids != 0 && !remote_add_provider) {
++ verbose("failed PKCS#11 add of \"%.100s\": remote addition of "
++ "providers is disabled", provider);
++ goto send;
++ }
+ if (realpath(provider, canonical_provider) == NULL) {
+ verbose("failed PKCS#11 add of \"%.100s\": realpath: %s",
+ provider, strerror(errno));
+@@ -1556,7 +1576,9 @@ main(int ac, char **av)
+ break;
+ case 'O':
+ if (strcmp(optarg, "no-restrict-websafe") == 0)
+- restrict_websafe = 0;
++ restrict_websafe = 0;
++ else if (strcmp(optarg, "allow-remote-pkcs11") == 0)
++ remote_add_provider = 1;
+ else
+ fatal("Unknown -O option");
+ break;
+--
+2.41.0
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
new file mode 100644
index 0000000000..57c45e3d93
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-48795.patch
@@ -0,0 +1,468 @@
+(modified to not remove ssh_packet_read_expect(), to add to
+KexAlgorithms in sshd.c and sshconnect2.c as this version pre-dates
+kex_proposal_populate_entries(), replace debug*_f() with debug*(),
+error*_f() with error*(), and fatal_f() with fatal())
+
+Backport of:
+
+From 1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5 Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:45:17 +0000
+Subject: [PATCH] upstream: implement "strict key exchange" in ssh and sshd
+
+This adds a protocol extension to improve the integrity of the SSH
+transport protocol, particular in and around the initial key exchange
+(KEX) phase.
+
+Full details of the extension are in the PROTOCOL file.
+
+with markus@
+
+OpenBSD-Commit-ID: 2a66ac962f0a630d7945fee54004ed9e9c439f14
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/openssh/tree/debian/patches/CVE-2023-48795.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/openssh/openssh-portable/commit/1edb00c58f8a6875fad6a497aa2bacf37f9e6cd5]
+CVE: CVE-2023-48795
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ PROTOCOL | 26 +++++++++++++++++
+ kex.c | 68 +++++++++++++++++++++++++++++++++-----------
+ kex.h | 1 +
+ packet.c | 78 ++++++++++++++++++++++++++++++++++++++-------------
+ sshconnect2.c | 14 +++------
+ sshd.c | 7 +++--
+ 6 files changed, 146 insertions(+), 48 deletions(-)
+
+diff --git a/PROTOCOL b/PROTOCOL
+index f75c1c0..89bddfe 100644
+--- a/PROTOCOL
++++ b/PROTOCOL
+@@ -102,6 +102,32 @@ OpenSSH supports the use of ECDH in Curve25519 for key exchange as
+ described at:
+ http://git.libssh.org/users/aris/libssh.git/plain/doc/curve25519-sha256@libssh.org.txt?h=curve25519
+
++1.9 transport: strict key exchange extension
++
++OpenSSH supports a number of transport-layer hardening measures under
++a "strict KEX" feature. This feature is signalled similarly to the
++RFC8308 ext-info feature: by including a additional algorithm in the
++initiial SSH2_MSG_KEXINIT kex_algorithms field. The client may append
++"kex-strict-c-v00@openssh.com" to its kex_algorithms and the server
++may append "kex-strict-s-v00@openssh.com". These pseudo-algorithms
++are only valid in the initial SSH2_MSG_KEXINIT and MUST be ignored
++if they are present in subsequent SSH2_MSG_KEXINIT packets.
++
++When an endpoint that supports this extension observes this algorithm
++name in a peer's KEXINIT packet, it MUST make the following changes to
++the the protocol:
++
++a) During initial KEX, terminate the connection if any unexpected or
++ out-of-sequence packet is received. This includes terminating the
++ connection if the first packet received is not SSH2_MSG_KEXINIT.
++ Unexpected packets for the purpose of strict KEX include messages
++ that are otherwise valid at any time during the connection such as
++ SSH2_MSG_DEBUG and SSH2_MSG_IGNORE.
++b) After sending or receiving a SSH2_MSG_NEWKEYS message, reset the
++ packet sequence number to zero. This behaviour persists for the
++ duration of the connection (i.e. not just the first
++ SSH2_MSG_NEWKEYS).
++
+ 2. Connection protocol changes
+
+ 2.1. connection: Channel write close extension "eow@openssh.com"
+diff --git a/kex.c b/kex.c
+index ce85f04..3129a4e 100644
+--- a/kex.c
++++ b/kex.c
+@@ -63,7 +63,7 @@
+ #include "digest.h"
+
+ /* prototype */
+-static int kex_choose_conf(struct ssh *);
++static int kex_choose_conf(struct ssh *, uint32_t seq);
+ static int kex_input_newkeys(int, u_int32_t, struct ssh *);
+
+ static const char *proposal_names[PROPOSAL_MAX] = {
+@@ -173,6 +173,18 @@ kex_names_valid(const char *names)
+ return 1;
+ }
+
++/* returns non-zero if proposal contains any algorithm from algs */
++static int
++has_any_alg(const char *proposal, const char *algs)
++{
++ char *cp;
++
++ if ((cp = match_list(proposal, algs, NULL)) == NULL)
++ return 0;
++ free(cp);
++ return 1;
++}
++
+ /*
+ * Concatenate algorithm names, avoiding duplicates in the process.
+ * Caller must free returned string.
+@@ -180,7 +192,7 @@ kex_names_valid(const char *names)
+ char *
+ kex_names_cat(const char *a, const char *b)
+ {
+- char *ret = NULL, *tmp = NULL, *cp, *p, *m;
++ char *ret = NULL, *tmp = NULL, *cp, *p;
+ size_t len;
+
+ if (a == NULL || *a == '\0')
+@@ -197,10 +209,8 @@ kex_names_cat(const char *a, const char *b)
+ }
+ strlcpy(ret, a, len);
+ for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) {
+- if ((m = match_list(ret, p, NULL)) != NULL) {
+- free(m);
++ if (has_any_alg(ret, p))
+ continue; /* Algorithm already present */
+- }
+ if (strlcat(ret, ",", len) >= len ||
+ strlcat(ret, p, len) >= len) {
+ free(tmp);
+@@ -409,7 +419,12 @@ kex_protocol_error(int type, u_int32_t seq, struct ssh *ssh)
+ {
+ int r;
+
+- error("kex protocol error: type %d seq %u", type, seq);
++ /* If in strict mode, any unexpected message is an error */
++ if ((ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict) {
++ ssh_packet_disconnect(ssh, "strict KEX violation: "
++ "unexpected packet type %u (seqnr %u)", type, seq);
++ }
++ error("type %u seq %u", type, seq);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_UNIMPLEMENTED)) != 0 ||
+ (r = sshpkt_put_u32(ssh, seq)) != 0 ||
+ (r = sshpkt_send(ssh)) != 0)
+@@ -481,6 +496,11 @@ kex_input_ext_info(int type, u_int32_t seq, struct ssh *ssh)
+ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &kex_protocol_error);
+ if ((r = sshpkt_get_u32(ssh, &ninfo)) != 0)
+ return r;
++ if (ninfo >= 1024) {
++ error("SSH2_MSG_EXT_INFO with too many entries, expected "
++ "<=1024, received %u", ninfo);
++ return dispatch_protocol_error(type, seq, ssh);
++ }
+ for (i = 0; i < ninfo; i++) {
+ if ((r = sshpkt_get_cstring(ssh, &name, NULL)) != 0)
+ return r;
+@@ -581,7 +601,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ error("%s: no hex", __func__);
+ return SSH_ERR_INTERNAL_ERROR;
+ }
+- ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, NULL);
++ ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, &kex_protocol_error);
+ ptr = sshpkt_ptr(ssh, &dlen);
+ if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0)
+ return r;
+@@ -617,7 +637,7 @@ kex_input_kexinit(int type, u_int32_t seq, struct ssh *ssh)
+ if (!(kex->flags & KEX_INIT_SENT))
+ if ((r = kex_send_kexinit(ssh)) != 0)
+ return r;
+- if ((r = kex_choose_conf(ssh)) != 0)
++ if ((r = kex_choose_conf(ssh, seq)) != 0)
+ return r;
+
+ if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL)
+@@ -880,7 +900,13 @@ proposals_match(char *my[PROPOSAL_MAX], char *peer[PROPOSAL_MAX])
+ }
+
+ static int
+-kex_choose_conf(struct ssh *ssh)
++kexalgs_contains(char **peer, const char *ext)
++{
++ return has_any_alg(peer[PROPOSAL_KEX_ALGS], ext);
++}
++
++static int
++kex_choose_conf(struct ssh *ssh, uint32_t seq)
+ {
+ struct kex *kex = ssh->kex;
+ struct newkeys *newkeys;
+@@ -905,13 +931,23 @@ kex_choose_conf(struct ssh *ssh)
+ sprop=peer;
+ }
+
+- /* Check whether client supports ext_info_c */
+- if (kex->server && (kex->flags & KEX_INITIAL)) {
+- char *ext;
+-
+- ext = match_list("ext-info-c", peer[PROPOSAL_KEX_ALGS], NULL);
+- kex->ext_info_c = (ext != NULL);
+- free(ext);
++ /* Check whether peer supports ext_info/kex_strict */
++ if ((kex->flags & KEX_INITIAL) != 0) {
++ if (kex->server) {
++ kex->ext_info_c = kexalgs_contains(peer, "ext-info-c");
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-c-v00@openssh.com");
++ } else {
++ kex->kex_strict = kexalgs_contains(peer,
++ "kex-strict-s-v00@openssh.com");
++ }
++ if (kex->kex_strict) {
++ debug3("will use strict KEX ordering");
++ if (seq != 0)
++ ssh_packet_disconnect(ssh,
++ "strict KEX violation: "
++ "KEXINIT was not the first packet");
++ }
+ }
+
+ /* Algorithm Negotiation */
+diff --git a/kex.h b/kex.h
+index a5ae6ac..cae38f7 100644
+--- a/kex.h
++++ b/kex.h
+@@ -145,6 +145,7 @@ struct kex {
+ u_int kex_type;
+ char *server_sig_algs;
+ int ext_info_c;
++ int kex_strict;
+ struct sshbuf *my;
+ struct sshbuf *peer;
+ struct sshbuf *client_version;
+diff --git a/packet.c b/packet.c
+index 6d3e917..43139f9 100644
+--- a/packet.c
++++ b/packet.c
+@@ -1203,8 +1203,13 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ sshbuf_dump(state->output, stderr);
+ #endif
+ /* increment sequence number for outgoing packets */
+- if (++state->p_send.seqnr == 0)
++ if (++state->p_send.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "outgoing sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("outgoing seqnr wraps around");
++ }
+ if (++state->p_send.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1212,6 +1217,11 @@ ssh_packet_send2_wrapped(struct ssh *ssh)
+ state->p_send.bytes += len;
+ sshbuf_reset(state->outgoing_packet);
+
++ if (type == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug("resetting send seqnr %u", state->p_send.seqnr);
++ state->p_send.seqnr = 0;
++ }
++
+ if (type == SSH2_MSG_NEWKEYS)
+ r = ssh_set_newkeys(ssh, MODE_OUT);
+ else if (type == SSH2_MSG_USERAUTH_SUCCESS && state->server_side)
+@@ -1345,8 +1355,7 @@ ssh_packet_read_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ /* Stay in the loop until we have received a complete packet. */
+ for (;;) {
+ /* Try to read a packet from the buffer. */
+- r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p);
+- if (r != 0)
++ if ((r = ssh_packet_read_poll_seqnr(ssh, typep, seqnr_p)) != 0)
+ break;
+ /* If we got a packet, return it. */
+ if (*typep != SSH_MSG_NONE)
+@@ -1633,10 +1642,16 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ if ((r = sshbuf_consume(state->input, mac->mac_len)) != 0)
+ goto out;
+ }
++
+ if (seqnr_p != NULL)
+ *seqnr_p = state->p_read.seqnr;
+- if (++state->p_read.seqnr == 0)
++ if (++state->p_read.seqnr == 0) {
++ if ((ssh->kex->flags & KEX_INITIAL) != 0) {
++ ssh_packet_disconnect(ssh, "incoming sequence number "
++ "wrapped during initial key exchange");
++ }
+ logit("incoming seqnr wraps around");
++ }
+ if (++state->p_read.packets == 0)
+ if (!(ssh->compat & SSH_BUG_NOREKEY))
+ return SSH_ERR_NEED_REKEY;
+@@ -1702,6 +1717,10 @@ ssh_packet_read_poll2(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ #endif
+ /* reset for next packet */
+ state->packlen = 0;
++ if (*typep == SSH2_MSG_NEWKEYS && ssh->kex->kex_strict) {
++ debug("resetting read seqnr %u", state->p_read.seqnr);
++ state->p_read.seqnr = 0;
++ }
+
+ /* do we need to rekey? */
+ if (ssh_packet_need_rekeying(ssh, 0)) {
+@@ -1726,10 +1745,39 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ r = ssh_packet_read_poll2(ssh, typep, seqnr_p);
+ if (r != 0)
+ return r;
+- if (*typep) {
+- state->keep_alive_timeouts = 0;
+- DBG(debug("received packet type %d", *typep));
++ if (*typep == 0) {
++ /* no message ready */
++ return 0;
++ }
++ state->keep_alive_timeouts = 0;
++ DBG(debug("received packet type %d", *typep));
++
++ /* Always process disconnect messages */
++ if (*typep == SSH2_MSG_DISCONNECT) {
++ if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
++ (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
++ return r;
++ /* Ignore normal client exit notifications */
++ do_log2(ssh->state->server_side &&
++ reason == SSH2_DISCONNECT_BY_APPLICATION ?
++ SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
++ "Received disconnect from %s port %d:"
++ "%u: %.400s", ssh_remote_ipaddr(ssh),
++ ssh_remote_port(ssh), reason, msg);
++ free(msg);
++ return SSH_ERR_DISCONNECTED;
+ }
++
++ /*
++ * Do not implicitly handle any messages here during initial
++ * KEX when in strict mode. They will be need to be allowed
++ * explicitly by the KEX dispatch table or they will generate
++ * protocol errors.
++ */
++ if (ssh->kex != NULL &&
++ (ssh->kex->flags & KEX_INITIAL) && ssh->kex->kex_strict)
++ return 0;
++ /* Implicitly handle transport-level messages */
+ switch (*typep) {
+ case SSH2_MSG_IGNORE:
+ debug3("Received SSH2_MSG_IGNORE");
+@@ -1744,19 +1792,6 @@ ssh_packet_read_poll_seqnr(struct ssh *ssh, u_char *typep, u_int32_t *seqnr_p)
+ debug("Remote: %.900s", msg);
+ free(msg);
+ break;
+- case SSH2_MSG_DISCONNECT:
+- if ((r = sshpkt_get_u32(ssh, &reason)) != 0 ||
+- (r = sshpkt_get_string(ssh, &msg, NULL)) != 0)
+- return r;
+- /* Ignore normal client exit notifications */
+- do_log2(ssh->state->server_side &&
+- reason == SSH2_DISCONNECT_BY_APPLICATION ?
+- SYSLOG_LEVEL_INFO : SYSLOG_LEVEL_ERROR,
+- "Received disconnect from %s port %d:"
+- "%u: %.400s", ssh_remote_ipaddr(ssh),
+- ssh_remote_port(ssh), reason, msg);
+- free(msg);
+- return SSH_ERR_DISCONNECTED;
+ case SSH2_MSG_UNIMPLEMENTED:
+ if ((r = sshpkt_get_u32(ssh, &seqnr)) != 0)
+ return r;
+@@ -2235,6 +2270,7 @@ kex_to_blob(struct sshbuf *m, struct kex *kex)
+ (r = sshbuf_put_u32(m, kex->hostkey_type)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_put_u32(m, kex->kex_type)) != 0 ||
++ (r = sshbuf_put_u32(m, kex->kex_strict)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_put_stringb(m, kex->client_version)) != 0 ||
+@@ -2397,6 +2433,7 @@ kex_from_blob(struct sshbuf *m, struct kex **kexp)
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_type)) != 0 ||
+ (r = sshbuf_get_u32(m, (u_int *)&kex->hostkey_nid)) != 0 ||
+ (r = sshbuf_get_u32(m, &kex->kex_type)) != 0 ||
++ (r = sshbuf_get_u32(m, &kex->kex_strict)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->my)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->peer)) != 0 ||
+ (r = sshbuf_get_stringb(m, kex->client_version)) != 0 ||
+@@ -2724,6 +2761,7 @@ sshpkt_disconnect(struct ssh *ssh, const char *fmt,...)
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
++ debug2("sending SSH2_MSG_DISCONNECT: %s", buf);
+ if ((r = sshpkt_start(ssh, SSH2_MSG_DISCONNECT)) != 0 ||
+ (r = sshpkt_put_u32(ssh, SSH2_DISCONNECT_PROTOCOL_ERROR)) != 0 ||
+ (r = sshpkt_put_cstring(ssh, buf)) != 0 ||
+diff --git a/sshconnect2.c b/sshconnect2.c
+index 5df9477..617ed9f 100644
+--- a/sshconnect2.c
++++ b/sshconnect2.c
+@@ -218,7 +218,8 @@ ssh_kex2(struct ssh *ssh, char *host, struct sockaddr *hostaddr, u_short port)
+ fatal("%s: kex_assemble_namelist", __func__);
+ free(all_key);
+
+- if ((s = kex_names_cat(options.kex_algorithms, "ext-info-c")) == NULL)
++ if ((s = kex_names_cat(options.kex_algorithms,
++ "ext-info-c,kex-strict-c-v00@openssh.com")) == NULL)
+ fatal("%s: kex_names_cat", __func__);
+ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] =
+@@ -343,7 +344,6 @@ struct cauthmethod {
+ };
+
+ static int input_userauth_service_accept(int, u_int32_t, struct ssh *);
+-static int input_userauth_ext_info(int, u_int32_t, struct ssh *);
+ static int input_userauth_success(int, u_int32_t, struct ssh *);
+ static int input_userauth_failure(int, u_int32_t, struct ssh *);
+ static int input_userauth_banner(int, u_int32_t, struct ssh *);
+@@ -460,7 +460,7 @@ ssh_userauth2(struct ssh *ssh, const char *local_user,
+
+ ssh->authctxt = &authctxt;
+ ssh_dispatch_init(ssh, &input_userauth_error);
+- ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &input_userauth_ext_info);
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, kex_input_ext_info);
+ ssh_dispatch_set(ssh, SSH2_MSG_SERVICE_ACCEPT, &input_userauth_service_accept);
+ ssh_dispatch_run_fatal(ssh, DISPATCH_BLOCK, &authctxt.success); /* loop until success */
+ pubkey_cleanup(ssh);
+@@ -505,13 +505,6 @@ input_userauth_service_accept(int type, u_int32_t seq, struct ssh *ssh)
+ return r;
+ }
+
+-/* ARGSUSED */
+-static int
+-input_userauth_ext_info(int type, u_int32_t seqnr, struct ssh *ssh)
+-{
+- return kex_input_ext_info(type, seqnr, ssh);
+-}
+-
+ void
+ userauth(struct ssh *ssh, char *authlist)
+ {
+@@ -593,6 +586,7 @@ input_userauth_success(int type, u_int32_t seq, struct ssh *ssh)
+ free(authctxt->methoddata);
+ authctxt->methoddata = NULL;
+ authctxt->success = 1; /* break out */
++ ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, dispatch_protocol_error);
+ return 0;
+ }
+
+diff --git a/sshd.c b/sshd.c
+index 60b2aaf..ffea38c 100644
+--- a/sshd.c
++++ b/sshd.c
+@@ -2323,11 +2323,13 @@ static void
+ do_ssh2_kex(struct ssh *ssh)
+ {
+ char *myproposal[PROPOSAL_MAX] = { KEX_SERVER };
++ char *s;
+ struct kex *kex;
+ int r;
+
+- myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(
+- options.kex_algorithms);
++ if ((s = kex_names_cat(options.kex_algorithms, "kex-strict-s-v00@openssh.com")) == NULL)
++ fatal("kex_names_cat");
++ myproposal[PROPOSAL_KEX_ALGS] = compat_kex_proposal(s);
+ myproposal[PROPOSAL_ENC_ALGS_CTOS] = compat_cipher_proposal(
+ options.ciphers);
+ myproposal[PROPOSAL_ENC_ALGS_STOC] = compat_cipher_proposal(
+@@ -2382,6 +2384,7 @@ do_ssh2_kex(struct ssh *ssh)
+ packet_send();
+ packet_write_wait();
+ #endif
++ free(s);
+ debug("KEX done");
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
new file mode 100644
index 0000000000..0ba8c312d0
--- /dev/null
+++ b/meta/recipes-connectivity/openssh/openssh/CVE-2023-51385.patch
@@ -0,0 +1,95 @@
+From 7ef3787c84b6b524501211b11a26c742f829af1a Mon Sep 17 00:00:00 2001
+From: "djm@openbsd.org" <djm@openbsd.org>
+Date: Mon, 18 Dec 2023 14:47:44 +0000
+Subject: [PATCH] upstream: ban user/hostnames with most shell metacharacters
+
+This makes ssh(1) refuse user or host names provided on the
+commandline that contain most shell metacharacters.
+
+Some programs that invoke ssh(1) using untrusted data do not filter
+metacharacters in arguments they supply. This could create
+interactions with user-specified ProxyCommand and other directives
+that allow shell injection attacks to occur.
+
+It's a mistake to invoke ssh(1) with arbitrary untrusted arguments,
+but getting this stuff right can be tricky, so this should prevent
+most obvious ways of creating risky situations. It however is not
+and cannot be perfect: ssh(1) has no practical way of interpreting
+what shell quoting rules are in use and how they interact with the
+user's specified ProxyCommand.
+
+To allow configurations that use strange user or hostnames to
+continue to work, this strictness is applied only to names coming
+from the commandline. Names specified using User or Hostname
+directives in ssh_config(5) are not affected.
+
+feedback/ok millert@ markus@ dtucker@ deraadt@
+
+OpenBSD-Commit-ID: 3b487348b5964f3e77b6b4d3da4c3b439e94b2d9
+
+CVE: CVE-2023-51385
+Upstream-Status: Backport [https://github.com/openssh/openssh-portable/commit/7ef3787c84b6b524501211b11a26c742f829af1a]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: Hunks refreshed to apply cleanly
+
+---
+ ssh.c | 41 ++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 40 insertions(+), 1 deletion(-)
+
+diff --git a/ssh.c b/ssh.c
+index 35c48e62d18..48d93ddf2a9 100644
+--- a/ssh.c
++++ b/ssh.c
+@@ -583,6 +583,41 @@ set_addrinfo_port(struct addrinfo *addrs
+ }
+ }
+
++static int
++valid_hostname(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\"$\\;&<>|(){}", s[i]) != NULL ||
++ isspace((u_char)s[i]) || iscntrl((u_char)s[i]))
++ return 0;
++ }
++ return 1;
++}
++
++static int
++valid_ruser(const char *s)
++{
++ size_t i;
++
++ if (*s == '-')
++ return 0;
++ for (i = 0; s[i] != 0; i++) {
++ if (strchr("'`\";&<>|(){}", s[i]) != NULL)
++ return 0;
++ /* Disallow '-' after whitespace */
++ if (isspace((u_char)s[i]) && s[i + 1] == '-')
++ return 0;
++ /* Disallow \ in last position */
++ if (s[i] == '\\' && s[i + 1] == '\0')
++ return 0;
++ }
++ return 1;
++}
++
+ /*
+ * Main program for the ssh client.
+ */
+@@ -1069,6 +1104,10 @@ main(int ac, char **av)
+ if (!host)
+ usage();
+
++ if (!valid_hostname(host))
++ fatal("hostname contains invalid characters");
++ if (options.user != NULL && !valid_ruser(options.user))
++ fatal("remote username contains invalid characters");
+ host_arg = xstrdup(host);
+
+ /* Initialize the command to execute on remote host. */
diff --git a/meta/recipes-connectivity/openssh/openssh/sshd.socket b/meta/recipes-connectivity/openssh/openssh/sshd.socket
index 12c39b26b5..8d76d62309 100644
--- a/meta/recipes-connectivity/openssh/openssh/sshd.socket
+++ b/meta/recipes-connectivity/openssh/openssh/sshd.socket
@@ -1,5 +1,6 @@
[Unit]
Conflicts=sshd.service
+Wants=sshdgenkeys.service
[Socket]
ExecStartPre=@BASE_BINDIR@/mkdir -p /var/run/sshd
diff --git a/meta/recipes-connectivity/openssh/openssh/sshd@.service b/meta/recipes-connectivity/openssh/openssh/sshd@.service
index 9d83dfb2bb..422450c7a1 100644
--- a/meta/recipes-connectivity/openssh/openssh/sshd@.service
+++ b/meta/recipes-connectivity/openssh/openssh/sshd@.service
@@ -1,13 +1,11 @@
[Unit]
Description=OpenSSH Per-Connection Daemon
-Wants=sshdgenkeys.service
After=sshdgenkeys.service
[Service]
Environment="SSHD_OPTS="
EnvironmentFile=-/etc/default/ssh
ExecStart=-@SBINDIR@/sshd -i $SSHD_OPTS
-ExecReload=@BASE_BINDIR@/kill -HUP $MAINPID
StandardInput=socket
StandardError=syslog
KillMode=process
diff --git a/meta/recipes-connectivity/openssh/openssh_8.2p1.bb b/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
index ddc9ed0b32..9d6cf7da6c 100644
--- a/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
+++ b/meta/recipes-connectivity/openssh/openssh_8.2p1.bb
@@ -27,6 +27,20 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
file://CVE-2020-14145.patch \
file://CVE-2021-28041.patch \
file://CVE-2021-41617.patch \
+ file://CVE-2023-38408-01.patch \
+ file://CVE-2023-38408-02.patch \
+ file://CVE-2023-38408-03.patch \
+ file://CVE-2023-38408-04.patch \
+ file://CVE-2023-38408-05.patch \
+ file://CVE-2023-38408-06.patch \
+ file://CVE-2023-38408-07.patch \
+ file://CVE-2023-38408-08.patch \
+ file://CVE-2023-38408-09.patch \
+ file://CVE-2023-38408-10.patch \
+ file://CVE-2023-38408-11.patch \
+ file://CVE-2023-38408-12.patch \
+ file://CVE-2023-48795.patch \
+ file://CVE-2023-51385.patch \
"
SRC_URI[md5sum] = "3076e6413e8dbe56d33848c1054ac091"
SRC_URI[sha256sum] = "43925151e6cf6cee1450190c0e9af4dc36b41c12737619edff8bcebdff64e671"
@@ -60,6 +74,13 @@ CVE_CHECK_WHITELIST += "CVE-2008-3844"
# https://ubuntu.com/security/CVE-2016-20012
CVE_CHECK_WHITELIST += "CVE-2016-20012"
+# As per debian, the issue is fixed by a feature called "agent restriction" in openssh 8.9
+# Urgency is unimportant as per debian, Hence this CVE is whitelisting.
+# https://security-tracker.debian.org/tracker/CVE-2021-36368
+# https://bugzilla.mindrot.org/show_bug.cgi?id=3316#c2
+# https://docs.ssh-mitm.at/trivialauth.html
+CVE_CHECK_WHITELIST += "CVE-2021-36368"
+
PAM_SRC_URI = "file://sshd"
inherit manpages useradd update-rc.d update-alternatives systemd
@@ -183,12 +204,17 @@ FILES_${PN}-sftp-server = "${libexecdir}/sftp-server"
FILES_${PN}-misc = "${bindir}/ssh* ${libexecdir}/ssh*"
FILES_${PN}-keygen = "${bindir}/ssh-keygen"
-RDEPENDS_${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen"
+RDEPENDS_${PN} += "${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-keygen ${PN}-sftp-server"
RDEPENDS_${PN}-sshd += "${PN}-keygen ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-keyinit pam-plugin-loginuid', '', d)}"
RRECOMMENDS_${PN}-sshd_append_class-target = "\
${@bb.utils.filter('PACKAGECONFIG', 'rng-tools', d)} \
"
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
+
# gdb would make attach-ptrace test pass rather than skip but not worth the build dependencies
RDEPENDS_${PN}-ptest += "${PN}-sftp ${PN}-misc ${PN}-sftp-server make sed sudo coreutils"
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch b/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch
new file mode 100644
index 0000000000..e2a65d0998
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/0001-Configure-add-2-missing-key-sorts.patch
@@ -0,0 +1,38 @@
+From 679ae2f72ef8cf37609cb0eff5de3b98aa85e395 Mon Sep 17 00:00:00 2001
+From: Steve Sakoman <steve@sakoman.com>
+Date: Thu, 20 Jul 2023 04:14:42 -1000
+Subject: [PATCH] Configure: add 2 missing key sorts in generation of unified_info
+
+Otherwise generation of this section in configdata.pm is not reproducible
+
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+Upstream-Status: Backport [adapted from 3.x commit https://github.com/openssl/openssl/commit/764cf5b26306a8712e8b3d41599c44dc5ed07a25]
+---
+ Configure | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Configure b/Configure
+index 2a01746..8fc5a2c 100755
+--- a/Configure
++++ b/Configure
+@@ -2326,7 +2326,7 @@ EOF
+ "dso" => [ @{$unified_info{engines}} ],
+ "bin" => [ @{$unified_info{programs}} ],
+ "script" => [ @{$unified_info{scripts}} ] );
+- foreach my $type (keys %loopinfo) {
++ foreach my $type (sort keys %loopinfo) {
+ foreach my $product (@{$loopinfo{$type}}) {
+ my %dirs = ();
+ my $pd = dirname($product);
+@@ -2347,7 +2347,7 @@ EOF
+ push @{$unified_info{dirinfo}->{$d}->{deps}}, $_
+ if $d ne $pd;
+ }
+- foreach (keys %dirs) {
++ foreach (sort keys %dirs) {
+ push @{$unified_info{dirinfo}->{$_}->{products}->{$type}},
+ $product;
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
new file mode 100644
index 0000000000..b3f6a942d5
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/0001-Configure-do-not-tweak-mips-cflags.patch
@@ -0,0 +1,37 @@
+From 326909baf81a638d51fa8be1d8227518784f5cc4 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex@linutronix.de>
+Date: Tue, 14 Sep 2021 12:18:25 +0200
+Subject: [PATCH] Configure: do not tweak mips cflags
+
+This conflicts with mips machine definitons from yocto,
+e.g.
+| Error: -mips3 conflicts with the other architecture options, which imply -mips64r2
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex@linutronix.de>
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ Configure | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+Index: openssl-3.0.4/Configure
+===================================================================
+--- openssl-3.0.4.orig/Configure
++++ openssl-3.0.4/Configure
+@@ -1243,16 +1243,6 @@ if ($target =~ /^mingw/ && `$config{CC} --target-help 2>&1` =~ m/-mno-cygwin/m)
+ push @{$config{shared_ldflag}}, "-mno-cygwin";
+ }
+
+-if ($target =~ /linux.*-mips/ && !$disabled{asm}
+- && !grep { $_ =~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
+- # minimally required architecture flags for assembly modules
+- my $value;
+- $value = '-mips2' if ($target =~ /mips32/);
+- $value = '-mips3' if ($target =~ /mips64/);
+- unshift @{$config{cflags}}, $value;
+- unshift @{$config{cxxflags}}, $value if $config{CXX};
+-}
+-
+ # If threads aren't disabled, check how possible they are
+ unless ($disabled{threads}) {
+ if ($auto_threads) {
diff --git a/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch b/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch
new file mode 100644
index 0000000000..3da6879ccb
--- /dev/null
+++ b/meta/recipes-connectivity/openssl/openssl/CVE-2024-0727.patch
@@ -0,0 +1,122 @@
+Backport of:
+
+From 09df4395b5071217b76dc7d3d2e630eb8c5a79c2 Mon Sep 17 00:00:00 2001
+From: Matt Caswell <matt@openssl.org>
+Date: Fri, 19 Jan 2024 11:28:58 +0000
+Subject: [PATCH] Add NULL checks where ContentInfo data can be NULL
+
+PKCS12 structures contain PKCS7 ContentInfo fields. These fields are
+optional and can be NULL even if the "type" is a valid value. OpenSSL
+was not properly accounting for this and a NULL dereference can occur
+causing a crash.
+
+CVE-2024-0727
+
+Reviewed-by: Tomas Mraz <tomas@openssl.org>
+Reviewed-by: Hugo Landau <hlandau@openssl.org>
+Reviewed-by: Neil Horman <nhorman@openssl.org>
+(Merged from https://github.com/openssl/openssl/pull/23362)
+
+(cherry picked from commit d135eeab8a5dbf72b3da5240bab9ddb7678dbd2c)
+
+Upstream-Status: Backport [https://github.com/openssl/openssl/commit/d135eeab8a5dbf72b3da5240bab9ddb7678dbd2c]
+
+CVE: CVE-2024-0727
+
+Signed-off-by: virendra thakur <virendrak@kpit.com>
+---
+ crypto/pkcs12/p12_add.c | 18 ++++++++++++++++++
+ crypto/pkcs12/p12_mutl.c | 5 +++++
+ crypto/pkcs12/p12_npas.c | 5 +++--
+ crypto/pkcs7/pk7_mime.c | 7 +++++--
+ 4 files changed, 31 insertions(+), 4 deletions(-)
+
+--- a/crypto/pkcs12/p12_add.c
++++ b/crypto/pkcs12/p12_add.c
+@@ -76,6 +76,13 @@ STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_
+ PKCS12_R_CONTENT_TYPE_NOT_DATA);
+ return NULL;
+ }
++
++ if (p7->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_P7DATA,
++ PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return ASN1_item_unpack(p7->d.data, ASN1_ITEM_rptr(PKCS12_SAFEBAGS));
+ }
+
+@@ -132,6 +139,12 @@ STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_
+ {
+ if (!PKCS7_type_is_encrypted(p7))
+ return NULL;
++
++ if (p7->d.encrypted == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_P7DATA, PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return PKCS12_item_decrypt_d2i(p7->d.encrypted->enc_data->algorithm,
+ ASN1_ITEM_rptr(PKCS12_SAFEBAGS),
+ pass, passlen,
+@@ -159,6 +172,13 @@ STACK_OF(PKCS7) *PKCS12_unpack_authsafes
+ PKCS12_R_CONTENT_TYPE_NOT_DATA);
+ return NULL;
+ }
++
++ if (p12->authsafes->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_UNPACK_AUTHSAFES,
++ PKCS12_R_DECODE_ERROR);
++ return NULL;
++ }
++
+ return ASN1_item_unpack(p12->authsafes->d.data,
+ ASN1_ITEM_rptr(PKCS12_AUTHSAFES));
+ }
+--- a/crypto/pkcs12/p12_mutl.c
++++ b/crypto/pkcs12/p12_mutl.c
+@@ -93,6 +93,11 @@ static int pkcs12_gen_mac(PKCS12 *p12, c
+ return 0;
+ }
+
++ if (p12->authsafes->d.data == NULL) {
++ PKCS12err(PKCS12_F_PKCS12_GEN_MAC, PKCS12_R_DECODE_ERROR);
++ return 0;
++ }
++
+ salt = p12->mac->salt->data;
+ saltlen = p12->mac->salt->length;
+ if (!p12->mac->iter)
+--- a/crypto/pkcs12/p12_npas.c
++++ b/crypto/pkcs12/p12_npas.c
+@@ -78,8 +78,9 @@ static int newpass_p12(PKCS12 *p12, cons
+ bags = PKCS12_unpack_p7data(p7);
+ } else if (bagnid == NID_pkcs7_encrypted) {
+ bags = PKCS12_unpack_p7encdata(p7, oldpass, -1);
+- if (!alg_get(p7->d.encrypted->enc_data->algorithm,
+- &pbe_nid, &pbe_iter, &pbe_saltlen))
++ if (p7->d.encrypted == NULL
++ || !alg_get(p7->d.encrypted->enc_data->algorithm,
++ &pbe_nid, &pbe_iter, &pbe_saltlen))
+ goto err;
+ } else {
+ continue;
+--- a/crypto/pkcs7/pk7_mime.c
++++ b/crypto/pkcs7/pk7_mime.c
+@@ -30,10 +30,13 @@ int SMIME_write_PKCS7(BIO *bio, PKCS7 *p
+ {
+ STACK_OF(X509_ALGOR) *mdalgs;
+ int ctype_nid = OBJ_obj2nid(p7->type);
+- if (ctype_nid == NID_pkcs7_signed)
++ if (ctype_nid == NID_pkcs7_signed) {
++ if (p7->d.sign == NULL)
++ return 0;
+ mdalgs = p7->d.sign->md_algs;
+- else
++ } else {
+ mdalgs = NULL;
++ }
+
+ flags ^= SMIME_OLDMIME;
+
diff --git a/meta/recipes-connectivity/openssl/openssl_1.1.1n.bb b/meta/recipes-connectivity/openssl/openssl_1.1.1w.bb
index 8538bd5a18..0e490eabc3 100644
--- a/meta/recipes-connectivity/openssl/openssl_1.1.1n.bb
+++ b/meta/recipes-connectivity/openssl/openssl_1.1.1w.bb
@@ -18,13 +18,16 @@ SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
file://afalg.patch \
file://reproducible.patch \
file://reproducibility.patch \
+ file://0001-Configure-add-2-missing-key-sorts.patch \
+ file://0001-Configure-do-not-tweak-mips-cflags.patch \
+ file://CVE-2024-0727.patch \
"
SRC_URI_append_class-nativesdk = " \
file://environment.d-openssl.sh \
"
-SRC_URI[sha256sum] = "40dceb51a4f6a5275bde0e6bf20ef4b91bfc32ed57c0552e2e8e15463372b17a"
+SRC_URI[sha256sum] = "cf3098950cb4d853ad95c0841f1f9c6d3dc102dccfcacd521d93925208b76ac8"
inherit lib_package multilib_header multilib_script ptest
MULTILIB_SCRIPTS = "${PN}-bin:${bindir}/c_rehash"
diff --git a/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
new file mode 100644
index 0000000000..27b8863a4e
--- /dev/null
+++ b/meta/recipes-connectivity/ppp/ppp/CVE-2022-4603.patch
@@ -0,0 +1,50 @@
+From 2aeb41a9a3a43b11b1e46628d0bf98197ff9f141 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Thu, 29 Dec 2022 18:00:20 +0100
+Subject: [PATCH] pppdump: Avoid out-of-range access to packet buffer
+
+This fixes a potential vulnerability where data is written to spkt.buf
+and rpkt.buf without a check on the array index. To fix this, we
+check the array index (pkt->cnt) before storing the byte or
+incrementing the count. This also means we no longer have a potential
+signed integer overflow on the increment of pkt->cnt.
+
+Fortunately, pppdump is not used in the normal process of setting up a
+PPP connection, is not installed setuid-root, and is not invoked
+automatically in any scenario that I am aware of.
+
+Ustream-Status: Backport [https://github.com/ppp-project/ppp/commit/a75fb7b198eed50d769c80c36629f38346882cbf]
+CVE: CVE-2022-4603
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+---
+ pppdump/pppdump.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/pppdump/pppdump.c b/pppdump/pppdump.c
+index 87c2e8f..dec4def 100644
+--- a/pppdump/pppdump.c
++++ b/pppdump/pppdump.c
+@@ -296,6 +296,10 @@ dumpppp(f)
+ printf("%s aborted packet:\n ", dir);
+ q = " ";
+ }
++ if (pkt->cnt >= sizeof(pkt->buf)) {
++ printf("%s over-long packet truncated:\n ", dir);
++ q = " ";
++ }
+ nb = pkt->cnt;
+ p = pkt->buf;
+ pkt->cnt = 0;
+@@ -399,7 +403,8 @@ dumpppp(f)
+ c ^= 0x20;
+ pkt->esc = 0;
+ }
+- pkt->buf[pkt->cnt++] = c;
++ if (pkt->cnt < sizeof(pkt->buf))
++ pkt->buf[pkt->cnt++] = c;
+ break;
+ }
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
index 76c1cc62a7..51ec25e660 100644
--- a/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
+++ b/meta/recipes-connectivity/ppp/ppp_2.4.7.bb
@@ -34,6 +34,7 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/${BP}.tar.gz \
file://0001-ppp-Remove-unneeded-include.patch \
file://ppp-2.4.7-DES-openssl.patch \
file://0001-pppd-Fix-bounds-check-in-EAP-code.patch \
+ file://CVE-2022-4603.patch \
"
SRC_URI_append_libc-musl = "\
diff --git a/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb b/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
index f482bd297f..5f0a5eac70 100644
--- a/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
+++ b/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb
@@ -11,7 +11,7 @@ AUTHOR = "Thomas Hood"
HOMEPAGE = "http://packages.debian.org/resolvconf"
RDEPENDS_${PN} = "bash"
-SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https;branch=master \
+SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https;branch=unstable \
file://fix-path-for-busybox.patch \
file://99_resolvconf \
"
diff --git a/meta/recipes-core/base-files/base-files/hosts b/meta/recipes-core/base-files/base-files/hosts
index b94f414d5c..10a5b6c704 100644
--- a/meta/recipes-core/base-files/base-files/hosts
+++ b/meta/recipes-core/base-files/base-files/hosts
@@ -1,4 +1,4 @@
-127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
diff --git a/meta/recipes-core/busybox/busybox.inc b/meta/recipes-core/busybox/busybox.inc
index 3553376582..f0c5666f47 100644
--- a/meta/recipes-core/busybox/busybox.inc
+++ b/meta/recipes-core/busybox/busybox.inc
@@ -139,6 +139,10 @@ do_configure () {
do_prepare_config
merge_config.sh -m .config ${@" ".join(find_cfgs(d))}
cml1_do_configure
+
+ # Save a copy of .config and autoconf.h.
+ cp .config .config.orig
+ cp include/autoconf.h include/autoconf.h.orig
}
do_compile() {
@@ -146,13 +150,17 @@ do_compile() {
if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
export KCONFIG_NOTIMESTAMP=1
fi
+
+ # Ensure we start do_compile with the original .config and autoconf.h.
+ # These files should always have matching timestamps.
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
+
if [ "${BUSYBOX_SPLIT_SUID}" = "1" -a x`grep "CONFIG_FEATURE_INDIVIDUAL=y" .config` = x ]; then
+ # Guard againt interrupted do_compile: clean temporary files.
+ rm -f .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+
# split the .config into two parts, and make two busybox binaries
- if [ -e .config.orig ]; then
- # Need to guard again an interrupted do_compile - restore any backup
- cp .config.orig .config
- fi
- cp .config .config.orig
oe_runmake busybox.cfg.suid
oe_runmake busybox.cfg.nosuid
@@ -189,15 +197,18 @@ do_compile() {
bbfatal "busybox suid binary incorrectly provides /bin/sh"
fi
- # copy .config.orig back to .config, because the install process may check this file
- cp .config.orig .config
# cleanup
- rm .config.orig .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
+ rm .config.app.suid .config.app.nosuid .config.disable.apps .config.nonapps
else
oe_runmake busybox_unstripped
cp busybox_unstripped busybox
oe_runmake busybox.links
fi
+
+ # restore original .config and autoconf.h, because the install process
+ # may check these files
+ cp .config.orig .config
+ cp include/autoconf.h.orig include/autoconf.h
}
do_install () {
diff --git a/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
new file mode 100644
index 0000000000..dfba2a7e0f
--- /dev/null
+++ b/meta/recipes-core/busybox/busybox/CVE-2022-48174.patch
@@ -0,0 +1,82 @@
+From c18ebf861528ef24958dd99a146482d2a40014c7 Mon Sep 17 00:00:00 2001
+From: Denys Vlasenko <vda.linux@googlemail.com>
+Date: Mon, 12 Jun 2023 17:48:47 +0200
+Subject: [PATCH] shell: avoid segfault on ${0::0/0~09J}. Closes 15216
+
+function old new delta
+evaluate_string 1011 1053 +42
+
+CVE: CVE-2022-48174
+Upstream-Status: Backport [d417193cf37ca1005830d7e16f5fa7e1d8a44209]
+Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
+---
+ shell/math.c | 39 +++++++++++++++++++++++++++++++++++----
+ 1 file changed, 35 insertions(+), 4 deletions(-)
+
+diff --git a/shell/math.c b/shell/math.c
+index af1ab55c0..79824e81f 100644
+--- a/shell/math.c
++++ b/shell/math.c
+@@ -578,6 +578,28 @@ static arith_t strto_arith_t(const char *nptr, char **endptr)
+ # endif
+ #endif
+
++//TODO: much better estimation than expr_len/2? Such as:
++//static unsigned estimate_nums_and_names(const char *expr)
++//{
++// unsigned count = 0;
++// while (*(expr = skip_whitespace(expr)) != '\0') {
++// const char *p;
++// if (isdigit(*expr)) {
++// while (isdigit(*++expr))
++// continue;
++// count++;
++// continue;
++// }
++// p = endofname(expr);
++// if (p != expr) {
++// expr = p;
++// count++;
++// continue;
++// }
++// }
++// return count;
++//}
++
+ static arith_t FAST_FUNC
+ evaluate_string(arith_state_t *math_state, const char *expr)
+ {
+@@ -585,10 +607,12 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ const char *errmsg;
+ const char *start_expr = expr = skip_whitespace(expr);
+ unsigned expr_len = strlen(expr) + 2;
+- /* Stack of integers */
+- /* The proof that there can be no more than strlen(startbuf)/2+1
+- * integers in any given correct or incorrect expression
+- * is left as an exercise to the reader. */
++ /* Stack of integers/names */
++ /* There can be no more than strlen(startbuf)/2+1
++ * integers/names in any given correct or incorrect expression.
++ * (modulo "09v09v09v09v09v" case,
++ * but we have code to detect that early)
++ */
+ var_or_num_t *const numstack = alloca((expr_len / 2) * sizeof(numstack[0]));
+ var_or_num_t *numstackptr = numstack;
+ /* Stack of operator tokens */
+@@ -657,6 +681,13 @@ evaluate_string(arith_state_t *math_state, const char *expr)
+ numstackptr->var = NULL;
+ errno = 0;
+ numstackptr->val = strto_arith_t(expr, (char**) &expr);
++ /* A number can't be followed by another number, or a variable name.
++ * We'd catch this later anyway, but this would require numstack[]
++ * to be twice as deep to handle strings where _every_ char is
++ * a new number or name. Example: 09v09v09v09v09v09v09v09v09v
++ */
++ if (isalnum(*expr) || *expr == '_')
++ goto err;
+ if (errno)
+ numstackptr->val = 0; /* bash compat */
+ goto num;
+--
+2.40.1
+
diff --git a/meta/recipes-core/busybox/busybox_1.31.1.bb b/meta/recipes-core/busybox/busybox_1.31.1.bb
index d062f0f7dd..94aa1467df 100644
--- a/meta/recipes-core/busybox/busybox_1.31.1.bb
+++ b/meta/recipes-core/busybox/busybox_1.31.1.bb
@@ -55,6 +55,7 @@ SRC_URI = "https://busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
file://CVE-2021-42374.patch \
file://CVE-2021-42376.patch \
file://CVE-2021-423xx-awk.patch \
+ file://CVE-2022-48174.patch \
file://0001-libbb-sockaddr2str-ensure-only-printable-characters-.patch \
file://0002-nslookup-sanitize-all-printed-strings-with-printable.patch \
"
diff --git a/meta/recipes-core/coreutils/coreutils_8.31.bb b/meta/recipes-core/coreutils/coreutils_8.31.bb
index 3d569881e8..3841f71155 100644
--- a/meta/recipes-core/coreutils/coreutils_8.31.bb
+++ b/meta/recipes-core/coreutils/coreutils_8.31.bb
@@ -51,6 +51,7 @@ PACKAGECONFIG_class-nativesdk ??= "xattr"
PACKAGECONFIG[acl] = "--enable-acl,--disable-acl,acl,"
PACKAGECONFIG[xattr] = "--enable-xattr,--disable-xattr,attr,"
PACKAGECONFIG[single-binary] = "--enable-single-binary,--disable-single-binary,,"
+PACKAGECONFIG[openssl] = "--with-openssl=yes,--with-openssl=no,openssl"
# [ df mktemp nice printenv base64 gets a special treatment and is not included in this
bindir_progs = "arch basename chcon cksum comm csplit cut dir dircolors dirname du \
diff --git a/meta/recipes-core/dbus/dbus-test_1.12.20.bb b/meta/recipes-core/dbus/dbus-test_1.12.24.bb
index 755c841bad..755c841bad 100644
--- a/meta/recipes-core/dbus/dbus-test_1.12.20.bb
+++ b/meta/recipes-core/dbus/dbus-test_1.12.24.bb
diff --git a/meta/recipes-core/dbus/dbus.inc b/meta/recipes-core/dbus/dbus.inc
index dcbcc0a9d6..9b5cc53d92 100644
--- a/meta/recipes-core/dbus/dbus.inc
+++ b/meta/recipes-core/dbus/dbus.inc
@@ -8,10 +8,10 @@ SRC_URI = "https://dbus.freedesktop.org/releases/dbus/dbus-${PV}.tar.gz \
file://tmpdir.patch \
file://dbus-1.init \
file://clear-guid_from_server-if-send_negotiate_unix_f.patch \
+ file://CVE-2023-34969.patch \
"
-SRC_URI[md5sum] = "dfe8a71f412e0b53be26ed4fbfdc91c4"
-SRC_URI[sha256sum] = "f77620140ecb4cdc67f37fb444f8a6bea70b5b6461f12f1cbe2cec60fa7de5fe"
+SRC_URI[sha256sum] = "bc42d196c1756ac520d61bf3ccd6f42013617def45dd1e591a6091abf51dca38"
EXTRA_OECONF = "--disable-xml-docs \
--disable-doxygen-docs \
@@ -32,3 +32,5 @@ PACKAGECONFIG[systemd] = "--enable-systemd --with-systemdsystemunitdir=${systemd
PACKAGECONFIG[x11] = "--with-x --enable-x11-autolaunch,--without-x --disable-x11-autolaunch, virtual/libx11 libsm"
PACKAGECONFIG[user-session] = "--enable-user-session --with-systemduserunitdir=${systemd_user_unitdir},--disable-user-session"
PACKAGECONFIG[verbose-mode] = "--enable-verbose-mode,,,"
+
+CVE_PRODUCT += "d-bus_project:d-bus freedesktop:dbus freedesktop:libdbus"
diff --git a/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch b/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch
new file mode 100644
index 0000000000..8f29185cf6
--- /dev/null
+++ b/meta/recipes-core/dbus/dbus/CVE-2023-34969.patch
@@ -0,0 +1,96 @@
+From 37a4dc5835731a1f7a81f1b67c45b8dfb556dd1c Mon Sep 17 00:00:00 2001
+From: hongjinghao <q1204531485@163.com>
+Date: Mon, 5 Jun 2023 18:17:06 +0100
+Subject: [PATCH] bus: Assign a serial number for messages from the driver
+
+Normally, it's enough to rely on a message being given a serial number
+by the DBusConnection just before it is actually sent. However, in the
+rare case where the policy blocks the driver from sending a message
+(due to a deny rule or the outgoing message quota being full), we need
+to get a valid serial number sooner, so that we can copy it into the
+DBUS_HEADER_FIELD_REPLY_SERIAL field (which is mandatory) in the error
+message sent to monitors. Otherwise, the dbus-daemon will crash with
+an assertion failure if at least one Monitoring client is attached,
+because zero is not a valid serial number to copy.
+
+This fixes a denial-of-service vulnerability: if a privileged user is
+monitoring the well-known system bus using a Monitoring client like
+dbus-monitor or `busctl monitor`, then an unprivileged user can cause
+denial-of-service by triggering this crash. A mitigation for this
+vulnerability is to avoid attaching Monitoring clients to the system
+bus when they are not needed. If there are no Monitoring clients, then
+the vulnerable code is not reached.
+
+Co-authored-by: Simon McVittie <smcv@collabora.com>
+Resolves: dbus/dbus#457
+(cherry picked from commit b159849e031000d1dbc1ab876b5fc78a3ce9b534)
+---
+ bus/connection.c | 15 +++++++++++++++
+ dbus/dbus-connection-internal.h | 2 ++
+ dbus/dbus-connection.c | 11 ++++++++++-
+ 3 files changed, 27 insertions(+), 1 deletion(-)
+
+diff --git a/bus/connection.c b/bus/connection.c
+index b3583433..215f0230 100644
+--- a/bus/connection.c
++++ b/bus/connection.c
+@@ -2350,6 +2350,21 @@ bus_transaction_send_from_driver (BusTransaction *transaction,
+ if (!dbus_message_set_sender (message, DBUS_SERVICE_DBUS))
+ return FALSE;
+
++ /* Make sure the message has a non-zero serial number, otherwise
++ * bus_transaction_capture_error_reply() will not be able to mock up
++ * a corresponding reply for it. Normally this would be delayed until
++ * the first time we actually send the message out from a
++ * connection, when the transaction is committed, but that's too late
++ * in this case.
++ */
++ if (dbus_message_get_serial (message) == 0)
++ {
++ dbus_uint32_t next_serial;
++
++ next_serial = _dbus_connection_get_next_client_serial (connection);
++ dbus_message_set_serial (message, next_serial);
++ }
++
+ if (bus_connection_is_active (connection))
+ {
+ if (!dbus_message_set_destination (message,
+diff --git a/dbus/dbus-connection-internal.h b/dbus/dbus-connection-internal.h
+index 48357321..ba79b192 100644
+--- a/dbus/dbus-connection-internal.h
++++ b/dbus/dbus-connection-internal.h
+@@ -54,6 +54,8 @@ DBUS_PRIVATE_EXPORT
+ DBusConnection * _dbus_connection_ref_unlocked (DBusConnection *connection);
+ DBUS_PRIVATE_EXPORT
+ void _dbus_connection_unref_unlocked (DBusConnection *connection);
++DBUS_PRIVATE_EXPORT
++dbus_uint32_t _dbus_connection_get_next_client_serial (DBusConnection *connection);
+ void _dbus_connection_queue_received_message_link (DBusConnection *connection,
+ DBusList *link);
+ dbus_bool_t _dbus_connection_has_messages_to_send_unlocked (DBusConnection *connection);
+diff --git a/dbus/dbus-connection.c b/dbus/dbus-connection.c
+index c525b6dc..09cef278 100644
+--- a/dbus/dbus-connection.c
++++ b/dbus/dbus-connection.c
+@@ -1456,7 +1456,16 @@ _dbus_connection_unref_unlocked (DBusConnection *connection)
+ _dbus_connection_last_unref (connection);
+ }
+
+-static dbus_uint32_t
++/**
++ * Allocate and return the next non-zero serial number for outgoing messages.
++ *
++ * This method is only valid to call from single-threaded code, such as
++ * the dbus-daemon, or with the connection lock held.
++ *
++ * @param connection the connection
++ * @returns A suitable serial number for the next message to be sent on the connection.
++ */
++dbus_uint32_t
+ _dbus_connection_get_next_client_serial (DBusConnection *connection)
+ {
+ dbus_uint32_t serial;
+--
+2.25.1
+
diff --git a/meta/recipes-core/dbus/dbus_1.12.20.bb b/meta/recipes-core/dbus/dbus_1.12.24.bb
index cf6f7dc0ef..cf6f7dc0ef 100644
--- a/meta/recipes-core/dbus/dbus_1.12.20.bb
+++ b/meta/recipes-core/dbus/dbus_1.12.24.bb
diff --git a/meta/recipes-core/dropbear/dropbear.inc b/meta/recipes-core/dropbear/dropbear.inc
index b949a9a337..0f5e9ba4ac 100644
--- a/meta/recipes-core/dropbear/dropbear.inc
+++ b/meta/recipes-core/dropbear/dropbear.inc
@@ -12,6 +12,11 @@ DEPENDS = "zlib virtual/crypt"
RPROVIDES_${PN} = "ssh sshd"
RCONFLICTS_${PN} = "openssh-sshd openssh"
+# break dependency on base package for -dev package
+# otherwise SDK fails to build as the main openssh and dropbear packages
+# conflict with each other
+RDEPENDS:${PN}-dev = ""
+
DEPENDS += "${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
@@ -24,6 +29,7 @@ SRC_URI = "http://matt.ucc.asn.au/dropbear/releases/dropbear-${PV}.tar.bz2 \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
${@bb.utils.contains('PACKAGECONFIG', 'disable-weak-ciphers', 'file://dropbear-disable-weak-ciphers.patch', '', d)} \
file://CVE-2020-36254.patch \
+ file://CVE-2021-36369.patch \
"
PAM_SRC_URI = "file://0005-dropbear-enable-pam.patch \
diff --git a/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
new file mode 100644
index 0000000000..5cabe8339d
--- /dev/null
+++ b/meta/recipes-core/dropbear/dropbear/CVE-2021-36369.patch
@@ -0,0 +1,145 @@
+From e10dec82930863e487b22978d3df107274f366b2 Mon Sep 17 00:00:00 2001
+From: Manfred Kaiser <37737811+manfred-kaiser@users.noreply.github.com>
+Date: Thu, 19 Aug 2021 17:37:14 +0200
+Subject: [PATCH] added option to disable trivial auth methods (#128)
+
+* added option to disable trivial auth methods
+
+* rename argument to match with other ssh clients
+
+* fixed trivial auth detection for pubkeys
+
+[https://github.com/mkj/dropbear/pull/128]
+Upstream-Status: Backport
+CVE: CVE-2021-36369
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ cli-auth.c | 3 +++
+ cli-authinteract.c | 1 +
+ cli-authpasswd.c | 2 +-
+ cli-authpubkey.c | 1 +
+ cli-runopts.c | 7 +++++++
+ cli-session.c | 1 +
+ runopts.h | 1 +
+ session.h | 1 +
+ 8 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/cli-auth.c b/cli-auth.c
+index 2e509e5..6f04495 100644
+--- a/cli-auth.c
++++ b/cli-auth.c
+@@ -267,6 +267,9 @@ void recv_msg_userauth_success() {
+ if DROPBEAR_CLI_IMMEDIATE_AUTH is set */
+
+ TRACE(("received msg_userauth_success"))
++ if (cli_opts.disable_trivial_auth && cli_ses.is_trivial_auth) {
++ dropbear_exit("trivial authentication not allowed");
++ }
+ /* Note: in delayed-zlib mode, setting authdone here
+ * will enable compression in the transport layer */
+ ses.authstate.authdone = 1;
+diff --git a/cli-authinteract.c b/cli-authinteract.c
+index e1cc9a1..f7128ee 100644
+--- a/cli-authinteract.c
++++ b/cli-authinteract.c
+@@ -114,6 +114,7 @@ void recv_msg_userauth_info_request() {
+ m_free(instruction);
+
+ for (i = 0; i < num_prompts; i++) {
++ cli_ses.is_trivial_auth = 0;
+ unsigned int response_len = 0;
+ prompt = buf_getstring(ses.payload, NULL);
+ cleantext(prompt);
+diff --git a/cli-authpasswd.c b/cli-authpasswd.c
+index 00fdd8b..a24d43e 100644
+--- a/cli-authpasswd.c
++++ b/cli-authpasswd.c
+@@ -155,7 +155,7 @@ void cli_auth_password() {
+
+ encrypt_packet();
+ m_burn(password, strlen(password));
+-
++ cli_ses.is_trivial_auth = 0;
+ TRACE(("leave cli_auth_password"))
+ }
+ #endif /* DROPBEAR_CLI_PASSWORD_AUTH */
+diff --git a/cli-authpubkey.c b/cli-authpubkey.c
+index 7cee164..7da1a04 100644
+--- a/cli-authpubkey.c
++++ b/cli-authpubkey.c
+@@ -174,6 +174,7 @@ static void send_msg_userauth_pubkey(sign_key *key, int type, int realsign) {
+ buf_putbytes(sigbuf, ses.writepayload->data, ses.writepayload->len);
+ cli_buf_put_sign(ses.writepayload, key, type, sigbuf);
+ buf_free(sigbuf); /* Nothing confidential in the buffer */
++ cli_ses.is_trivial_auth = 0;
+ }
+
+ encrypt_packet();
+diff --git a/cli-runopts.c b/cli-runopts.c
+index 7d1fffe..6bf8b8e 100644
+--- a/cli-runopts.c
++++ b/cli-runopts.c
+@@ -152,6 +152,7 @@ void cli_getopts(int argc, char ** argv) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ cli_opts.exit_on_fwd_failure = 0;
+ #endif
++ cli_opts.disable_trivial_auth = 0;
+ #if DROPBEAR_CLI_LOCALTCPFWD
+ cli_opts.localfwds = list_new();
+ opts.listen_fwd_all = 0;
+@@ -888,6 +889,7 @@ static void add_extendedopt(const char* origstr) {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ "\tExitOnForwardFailure\n"
+ #endif
++ "\tDisableTrivialAuth\n"
+ #ifndef DISABLE_SYSLOG
+ "\tUseSyslog\n"
+ #endif
+@@ -915,5 +917,10 @@ static void add_extendedopt(const char* origstr) {
+ return;
+ }
+
++ if (match_extendedopt(&optstr, "DisableTrivialAuth") == DROPBEAR_SUCCESS) {
++ cli_opts.disable_trivial_auth = parse_flag_value(optstr);
++ return;
++ }
++
+ dropbear_log(LOG_WARNING, "Ignoring unknown configuration option '%s'", origstr);
+ }
+diff --git a/cli-session.c b/cli-session.c
+index 56dd4af..73ef0db 100644
+--- a/cli-session.c
++++ b/cli-session.c
+@@ -164,6 +164,7 @@ static void cli_session_init(pid_t proxy_cmd_pid) {
+ /* Auth */
+ cli_ses.lastprivkey = NULL;
+ cli_ses.lastauthtype = 0;
++ cli_ses.is_trivial_auth = 1;
+
+ /* For printing "remote host closed" for the user */
+ ses.remoteclosed = cli_remoteclosed;
+diff --git a/runopts.h b/runopts.h
+index 31eae1f..8519626 100644
+--- a/runopts.h
++++ b/runopts.h
+@@ -154,6 +154,7 @@ typedef struct cli_runopts {
+ #if DROPBEAR_CLI_ANYTCPFWD
+ int exit_on_fwd_failure;
+ #endif
++ int disable_trivial_auth;
+ #if DROPBEAR_CLI_REMOTETCPFWD
+ m_list * remotefwds;
+ #endif
+diff --git a/session.h b/session.h
+index 0f77055..8676054 100644
+--- a/session.h
++++ b/session.h
+@@ -287,6 +287,7 @@ struct clientsession {
+
+ int lastauthtype; /* either AUTH_TYPE_PUBKEY or AUTH_TYPE_PASSWORD,
+ for the last type of auth we tried */
++ int is_trivial_auth;
+ int ignore_next_auth_response;
+ #if DROPBEAR_CLI_INTERACT_AUTH
+ int auth_interact_failed; /* flag whether interactive auth can still
diff --git a/meta/recipes-core/expat/expat/CVE-2022-40674.patch b/meta/recipes-core/expat/expat/CVE-2022-40674.patch
new file mode 100644
index 0000000000..8b95f5f198
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-40674.patch
@@ -0,0 +1,53 @@
+From 4a32da87e931ba54393d465bb77c40b5c33d343b Mon Sep 17 00:00:00 2001
+From: Rhodri James <rhodri@wildebeest.org.uk>
+Date: Wed, 17 Aug 2022 18:26:18 +0100
+Subject: [PATCH] Ensure raw tagnames are safe exiting internalEntityParser
+
+It is possible to concoct a situation in which parsing is
+suspended while substituting in an internal entity, so that
+XML_ResumeParser directly uses internalEntityProcessor as
+its processor. If the subsequent parse includes some unclosed
+tags, this will return without calling storeRawNames to ensure
+that the raw versions of the tag names are stored in memory other
+than the parse buffer itself. If the parse buffer is then changed
+or reallocated (for example if processing a file line by line),
+badness will ensue.
+
+This patch ensures storeRawNames is always called when needed
+after calling doContent. The earlier call do doContent does
+not need the same protection; it only deals with entity
+substitution, which cannot leave unbalanced tags, and in any
+case the raw names will be pointing into the stored entity
+value not the parse buffer.
+
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/4a32da87e931ba54393d465bb77c40b5c33d343b]
+CVE: CVE-2022-40674
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+ expat/lib/xmlparse.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+Index: expat/lib/xmlparse.c
+===================================================================
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -5657,10 +5657,15 @@ internalEntityProcessor(XML_Parser parse
+ {
+ parser->m_processor = contentProcessor;
+ /* see externalEntityContentProcessor vs contentProcessor */
+- return doContent(parser, parser->m_parentParser ? 1 : 0, parser->m_encoding,
+- s, end, nextPtr,
+- (XML_Bool)! parser->m_parsingStatus.finalBuffer,
+- XML_ACCOUNT_DIRECT);
++ result = doContent(parser, parser->m_parentParser ? 1 : 0,
++ parser->m_encoding, s, end, nextPtr,
++ (XML_Bool)! parser->m_parsingStatus.finalBuffer,
++ XML_ACCOUNT_DIRECT);
++ if (result == XML_ERROR_NONE) {
++ if (! storeRawNames(parser))
++ return XML_ERROR_NO_MEMORY;
++ }
++ return result;
+ }
+ }
+
diff --git a/meta/recipes-core/expat/expat/CVE-2022-43680.patch b/meta/recipes-core/expat/expat/CVE-2022-43680.patch
new file mode 100644
index 0000000000..6f93bc3ed7
--- /dev/null
+++ b/meta/recipes-core/expat/expat/CVE-2022-43680.patch
@@ -0,0 +1,33 @@
+From 5290462a7ea1278a8d5c0d5b2860d4e244f997e4 Mon Sep 17 00:00:00 2001
+From: Sebastian Pipping <sebastian@pipping.org>
+Date: Tue, 20 Sep 2022 02:44:34 +0200
+Subject: [PATCH] lib: Fix overeager DTD destruction in
+ XML_ExternalEntityParserCreate
+
+CVE: CVE-2022-43680
+Upstream-Status: Backport [https://github.com/libexpat/libexpat/commit/5290462a7ea1278a8d5c0d5b2860d4e244f997e4.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comments: Hunk refreshed
+---
+ lib/xmlparse.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/xmlparse.c b/lib/xmlparse.c
+index aacd6e7fc..57bf103cc 100644
+--- a/lib/xmlparse.c
++++ b/lib/xmlparse.c
+@@ -1035,6 +1035,14 @@ parserCreate(const XML_Char *encodingNam
+ parserInit(parser, encodingName);
+
+ if (encodingName && ! parser->m_protocolEncodingName) {
++ if (dtd) {
++ // We need to stop the upcoming call to XML_ParserFree from happily
++ // destroying parser->m_dtd because the DTD is shared with the parent
++ // parser and the only guard that keeps XML_ParserFree from destroying
++ // parser->m_dtd is parser->m_isParamEntity but it will be set to
++ // XML_TRUE only later in XML_ExternalEntityParserCreate (or not at all).
++ parser->m_dtd = NULL;
++ }
+ XML_ParserFree(parser);
+ return NULL;
+ }
diff --git a/meta/recipes-core/expat/expat_2.2.9.bb b/meta/recipes-core/expat/expat_2.2.9.bb
index f50e535922..8a5006e59a 100644
--- a/meta/recipes-core/expat/expat_2.2.9.bb
+++ b/meta/recipes-core/expat/expat_2.2.9.bb
@@ -20,6 +20,8 @@ SRC_URI = "git://github.com/libexpat/libexpat.git;protocol=https;branch=master \
file://CVE-2022-25314.patch \
file://CVE-2022-25315.patch \
file://libtool-tag.patch \
+ file://CVE-2022-40674.patch \
+ file://CVE-2022-43680.patch \
"
SRCREV = "a7bc26b69768f7fb24f0c7976fae24b157b85b13"
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
new file mode 100644
index 0000000000..ce90586290
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-29499.patch
@@ -0,0 +1,290 @@
+From 5f4485c4ff57fdefb1661531788def7ca5a47328 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 04:19:44 +0000
+Subject: [PATCH] gvariant-serialiser: Check offset table entry size is minimal
+
+The entries in an offset table (which is used for variable sized arrays
+and tuples containing variable sized members) are sized so that they can
+address every byte in the overall variant.
+
+The specification requires that for a variant to be in normal form, its
+offset table entries must be the minimum width such that they can
+address every byte in the variant.
+
+That minimality requirement was not checked in
+`g_variant_is_normal_form()`, leading to two different byte arrays being
+interpreted as the normal form of a given variant tree. That kind of
+confusion could potentially be exploited, and is certainly a bug.
+
+Fix it by adding the necessary checks on offset table entry width, and
+unit tests.
+
+Spotted by William Manley.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2794
+
+CVE: CVE-2023-29499
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/5f4485c4ff57fdefb1661531788def7ca5a47328]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 19 +++-
+ glib/tests/gvariant.c | 176 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 194 insertions(+), 1 deletion(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 0bf7243..5aa2cbc 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -694,6 +694,10 @@ gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ out.data_size = last_end;
+ out.array = value.data + last_end;
+ out.length = offsets_array_size / out.offset_size;
++
++ if (out.length > 0 && gvs_calculate_total_size (last_end, out.length) != value.size)
++ return out; /* offset size not minimal */
++
+ out.is_normal = TRUE;
+
+ return out;
+@@ -1201,6 +1205,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ gsize length;
+ gsize offset;
+ gsize i;
++ gsize offset_table_size;
+
+ /* as per the comment in gvs_tuple_get_child() */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+@@ -1305,7 +1310,19 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ }
+ }
+
+- return offset_ptr == offset;
++ /* @offset_ptr has been counting backwards from the end of the variant, to
++ * find the beginning of the offset table. @offset has been counting forwards
++ * from the beginning of the variant to find the end of the data. They should
++ * have met in the middle. */
++ if (offset_ptr != offset)
++ return FALSE;
++
++ offset_table_size = value.size - offset_ptr;
++ if (value.size > 0 &&
++ gvs_calculate_total_size (offset, offset_table_size / offset_size) != value.size)
++ return FALSE; /* offset size not minimal */
++
++ return TRUE;
+ }
+
+ /* Variants {{{2
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index d640c81..4ce0e4f 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5092,6 +5092,86 @@ test_normal_checking_array_offsets2 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_array_offsets_minimal_sized (void)
++{
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *aay_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *aay_deserialised = NULL;
++ GVariant *aay_normalised = NULL;
++
++ /* Construct an array of type aay, consisting of 128 elements which are each
++ * an empty array, i.e. `[[] * 128]`. This is chosen because the inner
++ * elements are variable sized (making the outer array variable sized, so it
++ * must have an offset table), but they are also zero-sized when serialised.
++ * So the serialised representation of @aay_constructed consists entirely of
++ * its offset table, which is entirely zeroes.
++ *
++ * The array is chosen to be 128 elements long because that means offset
++ * table entries which are 1 byte long. If the elements in the array were
++ * non-zero-sized (to the extent that the overall array is ≥256 bytes long),
++ * the offset table entries would end up being 2 bytes long. */
++ g_variant_builder_init (&builder, G_VARIANT_TYPE ("aay"));
++
++ for (i = 0; i < 128; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ aay_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed array is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (aay_constructed));
++ g_assert_cmpuint (g_variant_n_children (aay_constructed), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_constructed), ==, 128);
++
++ data = g_variant_get_data (aay_constructed);
++ for (i = 0; i < g_variant_get_size (aay_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `aay` GVariant which is `b'\0' * 256`. This has to
++ * be a non-normal form of `[[] * 128]`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the elements of the array are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ aay_deserialised = g_variant_new_from_data (G_VARIANT_TYPE ("aay"),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (aay_deserialised));
++ g_assert_cmpuint (g_variant_n_children (aay_deserialised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_deserialised), ==, 256);
++
++ data = g_variant_get_data (aay_deserialised);
++ for (i = 0; i < g_variant_get_size (aay_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ aay_normalised = g_variant_get_normal_form (aay_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (aay_normalised));
++ g_assert_cmpuint (g_variant_n_children (aay_normalised), ==, 128);
++ g_assert_cmpuint (g_variant_get_size (aay_normalised), ==, 128);
++
++ data = g_variant_get_data (aay_normalised);
++ for (i = 0; i < g_variant_get_size (aay_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (aay_normalised);
++ g_variant_unref (aay_deserialised);
++ g_variant_unref (aay_constructed);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5286,6 +5366,98 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* Test that an otherwise-valid serialised GVariant is considered non-normal if
++ * its offset table entries are too wide.
++ *
++ * See §2.3.6 (Framing Offsets) of the GVariant specification. */
++static void
++test_normal_checking_tuple_offsets_minimal_sized (void)
++{
++ GString *type_string = NULL;
++ GVariantBuilder builder;
++ gsize i;
++ GVariant *ray_constructed = NULL;
++ const guint8 *data = NULL;
++ guint8 *data_owned = NULL;
++ GVariant *ray_deserialised = NULL;
++ GVariant *ray_normalised = NULL;
++
++ /* Construct a tuple of type (ay…ay), consisting of 129 members which are each
++ * an empty array, i.e. `([] * 129)`. This is chosen because the inner
++ * members are variable sized, so the outer tuple must have an offset table,
++ * but they are also zero-sized when serialised. So the serialised
++ * representation of @ray_constructed consists entirely of its offset table,
++ * which is entirely zeroes.
++ *
++ * The tuple is chosen to be 129 members long because that means it has 128
++ * offset table entries which are 1 byte long each. If the members in the
++ * tuple were non-zero-sized (to the extent that the overall tuple is ≥256
++ * bytes long), the offset table entries would end up being 2 bytes long.
++ *
++ * 129 members are used unlike 128 array elements in
++ * test_normal_checking_array_offsets_minimal_sized(), because the last member
++ * in a tuple never needs an offset table entry. */
++ type_string = g_string_new ("");
++ g_string_append_c (type_string, '(');
++ for (i = 0; i < 129; i++)
++ g_string_append (type_string, "ay");
++ g_string_append_c (type_string, ')');
++
++ g_variant_builder_init (&builder, G_VARIANT_TYPE (type_string->str));
++
++ for (i = 0; i < 129; i++)
++ g_variant_builder_add_value (&builder, g_variant_new_array (G_VARIANT_TYPE_BYTE, NULL, 0));
++
++ ray_constructed = g_variant_builder_end (&builder);
++
++ /* Verify that the constructed tuple is in normal form, and its serialised
++ * form is `b'\0' * 128`. */
++ g_assert_true (g_variant_is_normal_form (ray_constructed));
++ g_assert_cmpuint (g_variant_n_children (ray_constructed), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_constructed), ==, 128);
++
++ data = g_variant_get_data (ray_constructed);
++ for (i = 0; i < g_variant_get_size (ray_constructed); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Construct a serialised `(ay…ay)` GVariant which is `b'\0' * 256`. This has
++ * to be a non-normal form of `([] * 129)`, with 2-byte-long offset table
++ * entries, because each offset table entry has to be able to reference all of
++ * the byte boundaries in the container. All the entries in the offset table
++ * are zero, so all the members of the tuple are zero-sized. */
++ data = data_owned = g_malloc0 (256);
++ ray_deserialised = g_variant_new_from_data (G_VARIANT_TYPE (type_string->str),
++ data,
++ 256,
++ FALSE,
++ g_free,
++ g_steal_pointer (&data_owned));
++
++ g_assert_false (g_variant_is_normal_form (ray_deserialised));
++ g_assert_cmpuint (g_variant_n_children (ray_deserialised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_deserialised), ==, 256);
++
++ data = g_variant_get_data (ray_deserialised);
++ for (i = 0; i < g_variant_get_size (ray_deserialised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ /* Get its normal form. That should change the serialised size. */
++ ray_normalised = g_variant_get_normal_form (ray_deserialised);
++
++ g_assert_true (g_variant_is_normal_form (ray_normalised));
++ g_assert_cmpuint (g_variant_n_children (ray_normalised), ==, 129);
++ g_assert_cmpuint (g_variant_get_size (ray_normalised), ==, 128);
++
++ data = g_variant_get_data (ray_normalised);
++ for (i = 0; i < g_variant_get_size (ray_normalised); i++)
++ g_assert_cmpuint (data[i], ==, 0);
++
++ g_variant_unref (ray_normalised);
++ g_variant_unref (ray_deserialised);
++ g_variant_unref (ray_constructed);
++ g_string_free (type_string, TRUE);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5431,6 +5603,8 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
+ test_normal_checking_array_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets/minimal-sized",
++ test_normal_checking_array_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
+@@ -5439,6 +5613,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
++ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
new file mode 100644
index 0000000000..b2187f2af9
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0001.patch
@@ -0,0 +1,89 @@
+From 1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:04:49 +0000
+Subject: [PATCH] gvariant-core: Consolidate construction of
+ `GVariantSerialised`
+
+So I only need to change it in one place.
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 8 +++++---
+ glib/tests/gvariant.c | 24 ++++++++++++++++++++++++
+ 2 files changed, 29 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 8ba701e..4dbd9e8 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5952,14 +5952,16 @@ g_variant_byteswap (GVariant *value)
+ g_variant_serialised_byteswap (serialised);
+
+ bytes = g_bytes_new_take (serialised.data, serialised.size);
+- new = g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE);
++ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
+ else
+ /* contains no multi-byte data */
+- new = value;
++ new = g_variant_get_normal_form (value);
+
+- return g_variant_ref_sink (new);
++ g_assert (g_variant_is_trusted (new));
++
++ return g_steal_pointer (&new);
+ }
+
+ /**
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 4ce0e4f..3dda08e 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -3834,6 +3834,29 @@ test_gv_byteswap (void)
+ g_free (string);
+ }
+
++static void
++test_gv_byteswap_non_normal_non_aligned (void)
++{
++ const guint8 data[] = { 0x02 };
++ GVariant *v = NULL;
++ GVariant *v_byteswapped = NULL;
++
++ g_test_summary ("Test that calling g_variant_byteswap() on a variant which "
++ "is in non-normal form and doesn’t need byteswapping returns "
++ "the same variant in normal form.");
++
++ v = g_variant_new_from_data (G_VARIANT_TYPE_BOOLEAN, data, sizeof (data), FALSE, NULL, NULL);
++ g_assert_false (g_variant_is_normal_form (v));
++
++ v_byteswapped = g_variant_byteswap (v);
++ g_assert_true (g_variant_is_normal_form (v_byteswapped));
++
++ g_assert_cmpvariant (v, v_byteswapped);
++
++ g_variant_unref (v);
++ g_variant_unref (v_byteswapped);
++}
++
+ static void
+ test_parser (void)
+ {
+@@ -5570,6 +5593,7 @@ main (int argc, char **argv)
+ g_test_add_func ("/gvariant/builder-memory", test_builder_memory);
+ g_test_add_func ("/gvariant/hashing", test_hashing);
+ g_test_add_func ("/gvariant/byteswap", test_gv_byteswap);
++ g_test_add_func ("/gvariant/byteswap/non-normal-non-aligned", test_gv_byteswap_non_normal_non_aligned);
+ g_test_add_func ("/gvariant/parser", test_parses);
+ g_test_add_func ("/gvariant/parser/integer-bounds", test_parser_integer_bounds);
+ g_test_add_func ("/gvariant/parser/recursion", test_parser_recursion);
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
new file mode 100644
index 0000000000..9167ea624f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32611-0002.patch
@@ -0,0 +1,255 @@
+From 446e69f5edd72deb2196dee36bbaf8056caf6948 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:39:34 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out functions for dealing with
+ framing offsets
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/446e69f5edd72deb2196dee36bbaf8056caf6948]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 81 +++++++++++++++++++++++++++++++++----------
+ glib/tests/gvariant.c | 57 ++++++++++++++++++++++++++----
+ 2 files changed, 112 insertions(+), 26 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index 4dbd9e8..a80c2c9 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5788,7 +5788,8 @@ g_variant_iter_loop (GVariantIter *iter,
+
+ /* Serialised data {{{1 */
+ static GVariant *
+-g_variant_deep_copy (GVariant *value)
++g_variant_deep_copy (GVariant *value,
++ gboolean byteswap)
+ {
+ switch (g_variant_classify (value))
+ {
+@@ -5806,7 +5807,7 @@ g_variant_deep_copy (GVariant *value)
+ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
+ GVariant *child = g_variant_get_child_value (value, i);
+- g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
++ g_variant_builder_add_value (&builder, g_variant_deep_copy (child, byteswap));
+ g_variant_unref (child);
+ }
+
+@@ -5820,28 +5821,63 @@ g_variant_deep_copy (GVariant *value)
+ return g_variant_new_byte (g_variant_get_byte (value));
+
+ case G_VARIANT_CLASS_INT16:
+- return g_variant_new_int16 (g_variant_get_int16 (value));
++ if (byteswap)
++ return g_variant_new_int16 (GUINT16_SWAP_LE_BE (g_variant_get_int16 (value)));
++ else
++ return g_variant_new_int16 (g_variant_get_int16 (value));
+
+ case G_VARIANT_CLASS_UINT16:
+- return g_variant_new_uint16 (g_variant_get_uint16 (value));
++ if (byteswap)
++ return g_variant_new_uint16 (GUINT16_SWAP_LE_BE (g_variant_get_uint16 (value)));
++ else
++ return g_variant_new_uint16 (g_variant_get_uint16 (value));
+
+ case G_VARIANT_CLASS_INT32:
+- return g_variant_new_int32 (g_variant_get_int32 (value));
++ if (byteswap)
++ return g_variant_new_int32 (GUINT32_SWAP_LE_BE (g_variant_get_int32 (value)));
++ else
++ return g_variant_new_int32 (g_variant_get_int32 (value));
+
+ case G_VARIANT_CLASS_UINT32:
+- return g_variant_new_uint32 (g_variant_get_uint32 (value));
++ if (byteswap)
++ return g_variant_new_uint32 (GUINT32_SWAP_LE_BE (g_variant_get_uint32 (value)));
++ else
++ return g_variant_new_uint32 (g_variant_get_uint32 (value));
+
+ case G_VARIANT_CLASS_INT64:
+- return g_variant_new_int64 (g_variant_get_int64 (value));
++ if (byteswap)
++ return g_variant_new_int64 (GUINT64_SWAP_LE_BE (g_variant_get_int64 (value)));
++ else
++ return g_variant_new_int64 (g_variant_get_int64 (value));
+
+ case G_VARIANT_CLASS_UINT64:
+- return g_variant_new_uint64 (g_variant_get_uint64 (value));
++ if (byteswap)
++ return g_variant_new_uint64 (GUINT64_SWAP_LE_BE (g_variant_get_uint64 (value)));
++ else
++ return g_variant_new_uint64 (g_variant_get_uint64 (value));
+
+ case G_VARIANT_CLASS_HANDLE:
+- return g_variant_new_handle (g_variant_get_handle (value));
++ if (byteswap)
++ return g_variant_new_handle (GUINT32_SWAP_LE_BE (g_variant_get_handle (value)));
++ else
++ return g_variant_new_handle (g_variant_get_handle (value));
+
+ case G_VARIANT_CLASS_DOUBLE:
+- return g_variant_new_double (g_variant_get_double (value));
++ if (byteswap)
++ {
++ /* We have to convert the double to a uint64 here using a union,
++ * because a cast will round it numerically. */
++ union
++ {
++ guint64 u64;
++ gdouble dbl;
++ } u1, u2;
++ u1.dbl = g_variant_get_double (value);
++ u2.u64 = GUINT64_SWAP_LE_BE (u1.u64);
++ return g_variant_new_double (u2.dbl);
++ }
++ else
++ return g_variant_new_double (g_variant_get_double (value));
+
+ case G_VARIANT_CLASS_STRING:
+ return g_variant_new_string (g_variant_get_string (value, NULL));
+@@ -5896,7 +5932,7 @@ g_variant_get_normal_form (GVariant *value)
+ if (g_variant_is_normal_form (value))
+ return g_variant_ref (value);
+
+- trusted = g_variant_deep_copy (value);
++ trusted = g_variant_deep_copy (value, FALSE);
+ g_assert (g_variant_is_trusted (trusted));
+
+ return g_variant_ref_sink (trusted);
+@@ -5916,6 +5952,11 @@ g_variant_get_normal_form (GVariant *value)
+ * contain multi-byte numeric data. That include strings, booleans,
+ * bytes and containers containing only these things (recursively).
+ *
++ * While this function can safely handle untrusted, non-normal data, it is
++ * recommended to check whether the input is in normal form beforehand, using
++ * g_variant_is_normal_form(), and to reject non-normal inputs if your
++ * application can be strict about what inputs it rejects.
++ *
+ * The returned value is always in normal form and is marked as trusted.
+ *
+ * Returns: (transfer full): the byteswapped form of @value
+@@ -5933,21 +5974,20 @@ g_variant_byteswap (GVariant *value)
+
+ g_variant_type_info_query (type_info, &alignment, NULL);
+
+- if (alignment)
+- /* (potentially) contains multi-byte numeric data */
++ if (alignment && g_variant_is_normal_form (value))
+ {
++ /* (potentially) contains multi-byte numeric data, but is also already in
++ * normal form so we can use a faster byteswapping codepath on the
++ * serialised data */
+ GVariantSerialised serialised = { 0, };
+- GVariant *trusted;
+ GBytes *bytes;
+
+- trusted = g_variant_get_normal_form (value);
+- serialised.type_info = g_variant_get_type_info (trusted);
+- serialised.size = g_variant_get_size (trusted);
++ serialised.type_info = g_variant_get_type_info (value);
++ serialised.size = g_variant_get_size (value);
+ serialised.data = g_malloc (serialised.size);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ serialised.checked_offsets_up_to = G_MAXSIZE;
+- g_variant_store (trusted, serialised.data);
+- g_variant_unref (trusted);
++ g_variant_store (value, serialised.data);
+
+ g_variant_serialised_byteswap (serialised);
+
+@@ -5955,6 +5995,9 @@ g_variant_byteswap (GVariant *value)
+ new = g_variant_ref_sink (g_variant_new_from_bytes (g_variant_get_type (value), bytes, TRUE));
+ g_bytes_unref (bytes);
+ }
++ else if (alignment)
++ /* (potentially) contains multi-byte numeric data */
++ new = g_variant_ref_sink (g_variant_deep_copy (value, TRUE));
+ else
+ /* contains no multi-byte data */
+ new = g_variant_get_normal_form (value);
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 3dda08e..679dd40 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -2284,24 +2284,67 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one = { 0, }, two = { 0, };
++ GVariantSerialised one = { 0, }, two = { 0, }, three = { 0, };
+ TreeInstance *tree;
+-
++ GVariant *one_variant = NULL;
++ GVariant *two_variant = NULL;
++ GVariant *two_byteswapped = NULL;
++ GVariant *three_variant = NULL;
++ GVariant *three_byteswapped = NULL;
++ guint8 *three_data_copy = NULL;
++ gsize three_size_copy = 0;
++
++ /* Write a tree out twice, once normally and once byteswapped. */
+ tree = tree_instance_new (NULL, 3);
+ serialise_tree (tree, &one);
+
++ one_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (one.type_info)),
++ one.data, one.size, FALSE, NULL, NULL);
++
+ i_am_writing_byteswapped = TRUE;
+ serialise_tree (tree, &two);
++ serialise_tree (tree, &three);
+ i_am_writing_byteswapped = FALSE;
+
+- g_variant_serialised_byteswap (two);
+-
+- g_assert_cmpmem (one.data, one.size, two.data, two.size);
+- g_assert_cmpuint (one.depth, ==, two.depth);
+-
++ /* Swap the first byteswapped one back using the function we want to test. */
++ two_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (two.type_info)),
++ two.data, two.size, FALSE, NULL, NULL);
++ two_byteswapped = g_variant_byteswap (two_variant);
++
++ /* Make the second byteswapped one non-normal (hopefully), and then byteswap
++ * it back using the function we want to test in its non-normal mode.
++ * This might not work because it’s not necessarily possible to make an
++ * arbitrary random variant non-normal. Adding a single zero byte to the end
++ * often makes something non-normal but still readable. */
++ three_size_copy = three.size + 1;
++ three_data_copy = g_malloc (three_size_copy);
++ memcpy (three_data_copy, three.data, three.size);
++ three_data_copy[three.size] = '\0';
++
++ three_variant = g_variant_new_from_data (G_VARIANT_TYPE (g_variant_type_info_get_type_string (three.type_info)),
++ three_data_copy, three_size_copy, FALSE, NULL, NULL);
++ three_byteswapped = g_variant_byteswap (three_variant);
++
++ /* Check they’re the same. We can always compare @one_variant and
++ * @two_byteswapped. We can only compare @two_byteswapped and
++ * @three_byteswapped if @two_variant and @three_variant are equal: in that
++ * case, the corruption to @three_variant was enough to make it non-normal but
++ * not enough to change its value. */
++ g_assert_cmpvariant (one_variant, two_byteswapped);
++
++ if (g_variant_equal (two_variant, three_variant))
++ g_assert_cmpvariant (two_byteswapped, three_byteswapped);
++
++ g_variant_unref (three_byteswapped);
++ g_variant_unref (three_variant);
++ g_variant_unref (two_byteswapped);
++ g_variant_unref (two_variant);
++ g_variant_unref (one_variant);
+ tree_instance_free (tree);
+ g_free (one.data);
+ g_free (two.data);
++ g_free (three.data);
++ g_free (three_data_copy);
+ }
+
+ static void
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
new file mode 100644
index 0000000000..533142b22a
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32636.patch
@@ -0,0 +1,49 @@
+From 21a204147b16539b3eda3143b32844c49e29f4d4 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:33:49 +0000
+Subject: [PATCH] gvariant: Propagate trust when getting a child of a
+ serialised variant
+
+If a variant is trusted, that means all its children are trusted, so
+ensure that their checked offsets are set as such.
+
+This allows a lot of the offset table checks to be avoided when getting
+children from trusted serialised tuples, which speeds things up.
+
+No unit test is included because this is just a performance fix. If
+there are other slownesses, or regressions, in serialised `GVariant`
+performance, the fuzzing setup will catch them like it did this one.
+
+This change does reduce the time to run the oss-fuzz reproducer from 80s
+to about 0.7s on my machine.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2841
+oss-fuzz#54314
+
+CVE: CVE-2023-32636
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/21a204147b16539b3eda3143b32844c49e29f4d4]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 1b9d5cc..ed57c70 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1173,8 +1173,8 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+- child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+- child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
++ child->contents.serialised.ordered_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = (value->state & STATE_TRUSTED) ? G_MAXSIZE : s_child.checked_offsets_up_to;
+
+ return child;
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
new file mode 100644
index 0000000000..9c0867bf5f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32643.patch
@@ -0,0 +1,154 @@
+From 78da5faccb3e065116b75b3ff87ff55381da6c76 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 11:24:43 +0000
+Subject: [PATCH] gvariant: Check offset table doesn't fall outside variant
+ bounds
+
+When dereferencing the first entry in the offset table for a tuple,
+check that it doesn’t fall outside the bounds of the variant first.
+
+This prevents an out-of-bounds read from some non-normal tuples.
+
+This bug was introduced in commit 73d0aa81c2575a5c9ae77d.
+
+Includes a unit test, although the test will likely only catch the
+original bug if run with asan enabled.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2840
+oss-fuzz#54302
+
+CVE: CVE-2023-32643
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/78da5faccb3e065116b75b3ff87ff55381da6c76]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 12 ++++++--
+ glib/tests/gvariant.c | 63 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 72 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 5aa2cbc..4e50ed7 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -979,7 +979,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+
+ member_info = g_variant_type_info_member_info (value.type_info, index_);
+
+- if (member_info->i + 1)
++ if (member_info->i + 1 &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_start = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 1),
+ offset_size);
+@@ -990,7 +991,8 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_start &= member_info->b;
+ member_start |= member_info->c;
+
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST &&
++ offset_size * (member_info->i + 1) <= value.size)
+ member_end = value.size - offset_size * (member_info->i + 1);
+
+ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+@@ -1001,11 +1003,15 @@ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ member_end = member_start + fixed_size;
+ }
+
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET &&
++ offset_size * (member_info->i + 2) <= value.size)
+ member_end = gvs_read_unaligned_le (value.data + value.size -
+ offset_size * (member_info->i + 2),
+ offset_size);
+
++ else /* invalid */
++ member_end = G_MAXSIZE;
++
+ if (out_member_start != NULL)
+ *out_member_start = member_start;
+ if (out_member_end != NULL)
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 679dd40..2eca8be 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -5432,6 +5432,67 @@ test_normal_checking_tuple_offsets4 (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that dereferencing the first element in the offset
++ * table doesn’t dereference memory before the start of the GVariant. The first
++ * element in the offset table gives the offset of the final member in the
++ * tuple (the offset table is stored in reverse), and the position of this final
++ * member is needed to check that none of the tuple members overlap with the
++ * offset table
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2840 */
++static void
++test_normal_checking_tuple_offsets5 (void)
++{
++ /* A tuple of type (sss) in normal form would have an offset table with two
++ * entries:
++ * - The first entry (lowest index in the table) gives the offset of the
++ * third `s` in the tuple, as the offset table is reversed compared to the
++ * tuple members.
++ * - The second entry (highest index in the table) gives the offset of the
++ * second `s` in the tuple.
++ * - The offset of the first `s` in the tuple is always 0.
++ *
++ * See §2.5.4 (Structures) of the GVariant specification for details, noting
++ * that the table is only layed out this way because all three members of the
++ * tuple have non-fixed sizes.
++ *
++ * It’s not clear whether the 0xaa data of this variant is part of the strings
++ * in the tuple, or part of the offset table. It doesn’t really matter. This
++ * is a regression test to check that the code to validate the offset table
++ * doesn’t unconditionally try to access the first entry in the offset table
++ * by subtracting the table size from the end of the GVariant data.
++ *
++ * In this non-normal case, that would result in an address off the start of
++ * the GVariant data, and an out-of-bounds read, because the GVariant is one
++ * byte long, but the offset table is calculated as two bytes long (with 1B
++ * sized entries) from the tuple’s type.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(sss)");
++ const guint8 data[] = { 0xaa };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ g_test_bug ("https://gitlab.gnome.org/GNOME/glib/-/issues/2840");
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++
++ expected = g_variant_new_parsed ("('', '', '')");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an otherwise-valid serialised GVariant is considered non-normal if
+ * its offset table entries are too wide.
+ *
+@@ -5680,6 +5741,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuple_offsets3);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
+ test_normal_checking_tuple_offsets4);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets5",
++ test_normal_checking_tuple_offsets5);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets/minimal-sized",
+ test_normal_checking_tuple_offsets_minimal_sized);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
new file mode 100644
index 0000000000..9fc58341cb
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0001.patch
@@ -0,0 +1,103 @@
+From 1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:04:49 +0000
+Subject: [PATCH] gvariant-core: Consolidate construction of
+ `GVariantSerialised`
+
+So I only need to change it in one place.
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/1deacdd4e8e35a5cf1417918ca4f6b0afa6409b1]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 49 ++++++++++++++++++++++----------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 9397573..aa0e0a0 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -349,6 +349,27 @@ g_variant_ensure_size (GVariant *value)
+ }
+ }
+
++/* < private >
++ * g_variant_to_serialised:
++ * @value: a #GVariant
++ *
++ * Gets a GVariantSerialised for a GVariant in state STATE_SERIALISED.
++ */
++inline static GVariantSerialised
++g_variant_to_serialised (GVariant *value)
++{
++ g_assert (value->state & STATE_SERIALISED);
++ {
++ GVariantSerialised serialised = {
++ value->type_info,
++ (gpointer) value->contents.serialised.data,
++ value->size,
++ value->depth,
++ };
++ return serialised;
++ }
++}
++
+ /* < private >
+ * g_variant_serialise:
+ * @value: a #GVariant
+@@ -991,16 +1012,8 @@ g_variant_n_children (GVariant *value)
+ g_variant_lock (value);
+
+ if (value->state & STATE_SERIALISED)
+- {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
+-
+- n_children = g_variant_serialised_n_children (serialised);
+- }
++ n_children = g_variant_serialised_n_children (
++ g_variant_to_serialised (value));
+ else
+ n_children = value->contents.tree.n_children;
+
+@@ -1061,12 +1074,7 @@ g_variant_get_child_value (GVariant *value,
+ }
+
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth,
+- };
++ GVariantSerialised serialised = g_variant_to_serialised (value);
+ GVariantSerialised s_child;
+ GVariant *child;
+
+@@ -1179,14 +1187,7 @@ g_variant_is_normal_form (GVariant *value)
+
+ if (value->state & STATE_SERIALISED)
+ {
+- GVariantSerialised serialised = {
+- value->type_info,
+- (gpointer) value->contents.serialised.data,
+- value->size,
+- value->depth
+- };
+-
+- if (g_variant_serialised_is_normal (serialised))
++ if (g_variant_serialised_is_normal (g_variant_to_serialised (value)))
+ value->state |= STATE_TRUSTED;
+ }
+ else
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
new file mode 100644
index 0000000000..0e96b8d457
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0002.patch
@@ -0,0 +1,210 @@
+From 446e69f5edd72deb2196dee36bbaf8056caf6948 Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 9 Aug 2023 10:39:34 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out functions for dealing with
+ framing offsets
+
+This introduces no functional changes.
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/446e69f5edd72deb2196dee36bbaf8056caf6948]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 108 +++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 51 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 83e9d85..c7c2114 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -633,30 +633,62 @@ gvs_calculate_total_size (gsize body_size,
+ return body_size + 8 * offsets;
+ }
+
++struct Offsets
++{
++ gsize data_size;
++
++ guchar *array;
++ gsize length;
++ guint offset_size;
++
++ gboolean is_normal;
++};
++
+ static gsize
+-gvs_variable_sized_array_n_children (GVariantSerialised value)
++gvs_offsets_get_offset_n (struct Offsets *offsets,
++ gsize n)
++{
++ return gvs_read_unaligned_le (
++ offsets->array + (offsets->offset_size * n), offsets->offset_size);
++}
++
++static struct Offsets
++gvs_variable_sized_array_get_frame_offsets (GVariantSerialised value)
+ {
++ struct Offsets out = { 0, };
+ gsize offsets_array_size;
+- gsize offset_size;
+ gsize last_end;
+
+ if (value.size == 0)
+- return 0;
+-
+- offset_size = gvs_get_offset_size (value.size);
++ {
++ out.is_normal = TRUE;
++ return out;
++ }
+
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ out.offset_size = gvs_get_offset_size (value.size);
++ last_end = gvs_read_unaligned_le (value.data + value.size - out.offset_size,
++ out.offset_size);
+
+ if (last_end > value.size)
+- return 0;
++ return out; /* offsets not normal */
+
+ offsets_array_size = value.size - last_end;
+
+- if (offsets_array_size % offset_size)
+- return 0;
++ if (offsets_array_size % out.offset_size)
++ return out; /* offsets not normal */
++
++ out.data_size = last_end;
++ out.array = value.data + last_end;
++ out.length = offsets_array_size / out.offset_size;
++ out.is_normal = TRUE;
+
+- return offsets_array_size / offset_size;
++ return out;
++}
++
++static gsize
++gvs_variable_sized_array_n_children (GVariantSerialised value)
++{
++ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
+ static GVariantSerialised
+@@ -664,8 +696,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offset_size;
+- gsize last_end;
++
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
++
+ gsize start;
+ gsize end;
+
+@@ -673,18 +706,11 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
+- offset_size = gvs_get_offset_size (value.size);
+-
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
+-
+ if (index_ > 0)
+ {
+ guint alignment;
+
+- start = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * (index_ - 1)),
+- offset_size);
++ start = gvs_offsets_get_offset_n (&offsets, index_ - 1);
+
+ g_variant_type_info_query (child.type_info, &alignment, NULL);
+ start += (-start) & alignment;
+@@ -692,11 +718,9 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ else
+ start = 0;
+
+- end = gvs_read_unaligned_le (value.data + last_end +
+- (offset_size * index_),
+- offset_size);
++ end = gvs_offsets_get_offset_n (&offsets, index_);
+
+- if (start < end && end <= value.size && end <= last_end)
++ if (start < end && end <= value.size && end <= offsets.data_size)
+ {
+ child.data = value.data + start;
+ child.size = end - start;
+@@ -768,34 +792,16 @@ static gboolean
+ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ {
+ GVariantSerialised child = { 0, };
+- gsize offsets_array_size;
+- guchar *offsets_array;
+- guint offset_size;
+ guint alignment;
+- gsize last_end;
+- gsize length;
+ gsize offset;
+ gsize i;
+
+- if (value.size == 0)
+- return TRUE;
+-
+- offset_size = gvs_get_offset_size (value.size);
+- last_end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size, offset_size);
++ struct Offsets offsets = gvs_variable_sized_array_get_frame_offsets (value);
+
+- if (last_end > value.size)
++ if (!offsets.is_normal)
+ return FALSE;
+
+- offsets_array_size = value.size - last_end;
+-
+- if (offsets_array_size % offset_size)
+- return FALSE;
+-
+- offsets_array = value.data + value.size - offsets_array_size;
+- length = offsets_array_size / offset_size;
+-
+- if (length == 0)
++ if (value.size != 0 && offsets.length == 0)
+ return FALSE;
+
+ child.type_info = g_variant_type_info_element (value.type_info);
+@@ -803,14 +809,14 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ child.depth = value.depth + 1;
+ offset = 0;
+
+- for (i = 0; i < length; i++)
++ for (i = 0; i < offsets.length; i++)
+ {
+ gsize this_end;
+
+- this_end = gvs_read_unaligned_le (offsets_array + offset_size * i,
+- offset_size);
++ this_end = gvs_read_unaligned_le (offsets.array + offsets.offset_size * i,
++ offsets.offset_size);
+
+- if (this_end < offset || this_end > last_end)
++ if (this_end < offset || this_end > offsets.data_size)
+ return FALSE;
+
+ while (offset & alignment)
+@@ -832,7 +838,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ offset = this_end;
+ }
+
+- g_assert (offset == last_end);
++ g_assert (offset == offsets.data_size);
+
+ return TRUE;
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
new file mode 100644
index 0000000000..e361cc7aad
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0003.patch
@@ -0,0 +1,417 @@
+From ade71fb544391b2e33e1859645726bfee0d5eaaf Mon Sep 17 00:00:00 2001
+From: William Manley <will@stb-tester.com>
+Date: Wed, 16 Aug 2023 03:12:21 +0000
+Subject: [PATCH] gvariant: Don't allow child elements to overlap with each
+ other
+
+If different elements of a variable sized array can overlap with each
+other then we can cause a `GVariant` to normalise to a much larger type.
+
+This commit changes the behaviour of `GVariant` with non-normal form data. If
+an invalid frame offset is found all subsequent elements are given their
+default value.
+
+When retrieving an element at index `n` we scan the frame offsets up to index
+`n` and if they are not in order we return an element with the default value
+for that type. This guarantees that elements don't overlap with each
+other. We remember the offset we've scanned up to so we don't need to
+repeat this work on subsequent accesses. We skip these checks for trusted
+data.
+
+Unfortunately this makes random access of untrusted data O(n) — at least
+on first access. It doesn't affect the algorithmic complexity of accessing
+elements in order, such as when using the `GVariantIter` interface. Also:
+the cost of validation will be amortised as the `GVariant` instance is
+continued to be used.
+
+I've implemented this with 4 different functions, 1 for each element size,
+rather than looping calling `gvs_read_unaligned_le` in the hope that the
+compiler will find it easy to optimise and should produce fairly tight
+code.
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/ade71fb544391b2e33e1859645726bfee0d5eaaf]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 35 ++++++++++++++++
+ glib/gvariant-serialiser.c | 86 ++++++++++++++++++++++++++++++++++++--
+ glib/gvariant-serialiser.h | 8 ++++
+ glib/tests/gvariant.c | 45 ++++++++++++++++++++
+ 4 files changed, 171 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index aa0e0a0..9b51e15 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -65,6 +65,7 @@ struct _GVariant
+ {
+ GBytes *bytes;
+ gconstpointer data;
++ gsize ordered_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -162,6 +163,24 @@ struct _GVariant
+ * if .data pointed to the appropriate number of nul
+ * bytes.
+ *
++ * .ordered_offsets_up_to: If ordered_offsets_up_to == n this means that all
++ * the frame offsets up to and including the frame
++ * offset determining the end of element n are in
++ * order. This guarantees that the bytes of element
++ * n don't overlap with any previous element.
++ *
++ * For trusted data this is set to G_MAXSIZE and we
++ * don't check that the frame offsets are in order.
++ *
++ * Note: This doesn't imply the offsets are good in
++ * any way apart from their ordering. In particular
++ * offsets may be out of bounds for this value or
++ * may imply that the data overlaps the frame
++ * offsets themselves.
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -365,6 +384,7 @@ g_variant_to_serialised (GVariant *value)
+ (gpointer) value->contents.serialised.data,
+ value->size,
+ value->depth,
++ value->contents.serialised.ordered_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -396,6 +416,7 @@ g_variant_serialise (GVariant *value,
+ serialised.size = value->size;
+ serialised.data = data;
+ serialised.depth = value->depth;
++ serialised.ordered_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -439,6 +460,15 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ g_assert (serialised->size == value->size);
+ serialised->depth = value->depth;
+
++ if (value->state & STATE_SERIALISED)
++ {
++ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ }
++ else
++ {
++ serialised->ordered_offsets_up_to = 0;
++ }
++
+ if (serialised->data)
+ /* g_variant_store() is a public API, so it
+ * it will reacquire the lock if it needs to.
+@@ -481,6 +511,7 @@ g_variant_ensure_serialised (GVariant *value)
+ bytes = g_bytes_new_take (data, value->size);
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
++ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -561,6 +592,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.type_info = value->type_info;
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -610,6 +642,8 @@ g_variant_new_from_bytes (const GVariantType *type,
+ value->contents.serialised.data = g_bytes_get_data (bytes, &value->size);
+ }
+
++ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+ return value;
+@@ -1108,6 +1142,7 @@ g_variant_get_child_value (GVariant *value,
+ child->contents.serialised.bytes =
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
++ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index c7c2114..fe0b1a4 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -264,6 +265,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ value.type_info = g_variant_type_info_element (value.type_info);
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -295,7 +297,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -317,6 +319,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ /* proper element size: "Just". recurse to the child. */
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -358,6 +361,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+ value.data = NULL;
+
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -388,7 +392,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -408,6 +412,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.size--;
+ value.depth++;
++ value.ordered_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -691,6 +696,32 @@ gvs_variable_sized_array_n_children (GVariantSerialised value)
+ return gvs_variable_sized_array_get_frame_offsets (value).length;
+ }
+
++/* Find the index of the first out-of-order element in @data, assuming that
++ * @data is an array of elements of given @type, starting at index @start and
++ * containing a further @len-@start elements. */
++#define DEFINE_FIND_UNORDERED(type) \
++ static gsize \
++ find_unordered_##type (const guint8 *data, gsize start, gsize len) \
++ { \
++ gsize off; \
++ type current, previous; \
++ \
++ memcpy (&previous, data + start * sizeof (current), sizeof (current)); \
++ for (off = (start + 1) * sizeof (current); off < len * sizeof (current); off += sizeof (current)) \
++ { \
++ memcpy (&current, data + off, sizeof (current)); \
++ if (current < previous) \
++ break; \
++ previous = current; \
++ } \
++ return off / sizeof (current) - 1; \
++ }
++
++DEFINE_FIND_UNORDERED (guint8);
++DEFINE_FIND_UNORDERED (guint16);
++DEFINE_FIND_UNORDERED (guint32);
++DEFINE_FIND_UNORDERED (guint64);
++
+ static GVariantSerialised
+ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ gsize index_)
+@@ -706,6 +737,49 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (child.type_info);
+ child.depth = value.depth + 1;
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping array elements). */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ switch (offsets.offset_size)
++ {
++ case 1:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint8 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 2:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint16 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 4:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint32 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ case 8:
++ {
++ value.ordered_offsets_up_to = find_unordered_guint64 (
++ offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ break;
++ }
++ default:
++ /* gvs_get_offset_size() only returns maximum 8 */
++ g_assert_not_reached ();
++ }
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (index_ > 0)
+ {
+ guint alignment;
+@@ -840,6 +914,9 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ g_assert (offset == offsets.data_size);
+
++ /* All offsets have now been checked. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ return TRUE;
+ }
+
+@@ -1072,7 +1149,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ for (i = 0; i < length; i++)
+ {
+ const GVariantMemberInfo *member_info;
+- GVariantSerialised child;
++ GVariantSerialised child = { 0, };
+ gsize fixed_size;
+ guint alignment;
+ gsize end;
+@@ -1132,6 +1209,9 @@ gvs_tuple_is_normal (GVariantSerialised value)
+ offset = end;
+ }
+
++ /* All element bounds have been checked above. */
++ value.ordered_offsets_up_to = G_MAXSIZE;
++
+ {
+ gsize fixed_size;
+ guint alignment;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 81343e9..99d18ef 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -29,6 +29,14 @@ typedef struct
+ guchar *data;
+ gsize size;
+ gsize depth; /* same semantics as GVariant.depth */
++ /* If ordered_offsets_up_to == n this means that all the frame offsets up to and
++ * including the frame offset determining the end of element n are in order.
++ * This guarantees that the bytes of element n don't overlap with any previous
++ * element.
++ *
++ * This is both read and set by g_variant_serialised_get_child for arrays of
++ * non-fixed-width types */
++ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialisation */
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 0e5ec8e..967e9a1 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,5 +1,6 @@
+ /*
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2020 William Manley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1283,6 +1284,7 @@ random_instance_filler (GVariantSerialised *serialised,
+ serialised->size = instance->size;
+
+ serialised->depth = 0;
++ serialised->ordered_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -5039,6 +5041,47 @@ test_normal_checking_array_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that array elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_832242 */
++static void
++test_normal_checking_array_offsets2 (void)
++{
++ const guint8 data[] = {
++ 'h', 'i', '\0',
++ 0x03, 0x00, 0x03,
++ 0x06, 0x00, 0x06,
++ 0x09, 0x00, 0x09,
++ 0x0c, 0x00, 0x0c,
++ 0x0f, 0x00, 0x0f,
++ 0x12, 0x00, 0x12,
++ 0x15, 0x00, 0x15,
++ };
++ gsize size = sizeof (data);
++ const GVariantType *aaaaaaas = G_VARIANT_TYPE ("aaaaaaas");
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (aaaaaaas, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 2);
++
++ expected = g_variant_new_parsed (
++ "[[[[[[['hi', '', ''], [], []], [], []], [], []], [], []], [], []], [], []]");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that a tuple with invalidly large values in its offset table is
+ * normalised successfully without looping infinitely. */
+ static void
+@@ -5206,6 +5249,8 @@ main (int argc, char **argv)
+ test_normal_checking_tuples);
+ g_test_add_func ("/gvariant/normal-checking/array-offsets",
+ test_normal_checking_array_offsets);
++ g_test_add_func ("/gvariant/normal-checking/array-offsets2",
++ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
new file mode 100644
index 0000000000..c057729aae
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0004.patch
@@ -0,0 +1,113 @@
+From 345cae9c1aa7bf6752039225ef4c8d8d69fa8d76 Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:09:12 +0000
+Subject: [PATCH] gvariant-serialiser: Factor out code to get bounds of a tuple
+ member
+
+This introduces no functional changes.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/345cae9c1aa7bf6752039225ef4c8d8d69fa8d76]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 73 ++++++++++++++++++++++++--------------
+ 1 file changed, 46 insertions(+), 27 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index fe0b1a4..6f9b366 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -942,6 +942,51 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++static void
++gvs_tuple_get_member_bounds (GVariantSerialised value,
++ gsize index_,
++ gsize offset_size,
++ gsize *out_member_start,
++ gsize *out_member_end)
++{
++ const GVariantMemberInfo *member_info;
++ gsize member_start, member_end;
++
++ member_info = g_variant_type_info_member_info (value.type_info, index_);
++
++ if (member_info->i + 1)
++ member_start = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 1),
++ offset_size);
++ else
++ member_start = 0;
++
++ member_start += member_info->a;
++ member_start &= member_info->b;
++ member_start |= member_info->c;
++
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
++ member_end = value.size - offset_size * (member_info->i + 1);
++
++ else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ {
++ gsize fixed_size;
++
++ g_variant_type_info_query (member_info->type_info, NULL, &fixed_size);
++ member_end = member_start + fixed_size;
++ }
++
++ else /* G_VARIANT_MEMBER_ENDING_OFFSET */
++ member_end = gvs_read_unaligned_le (value.data + value.size -
++ offset_size * (member_info->i + 2),
++ offset_size);
++
++ if (out_member_start != NULL)
++ *out_member_start = member_start;
++ if (out_member_end != NULL)
++ *out_member_end = member_end;
++}
++
+ static gsize
+ gvs_tuple_n_children (GVariantSerialised value)
+ {
+@@ -997,33 +1042,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+ }
+
+- if (member_info->i + 1)
+- start = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 1),
+- offset_size);
+- else
+- start = 0;
+-
+- start += member_info->a;
+- start &= member_info->b;
+- start |= member_info->c;
+-
+- if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_LAST)
+- end = value.size - offset_size * (member_info->i + 1);
+-
+- else if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
+- {
+- gsize fixed_size;
+-
+- g_variant_type_info_query (child.type_info, NULL, &fixed_size);
+- end = start + fixed_size;
+- child.size = fixed_size;
+- }
+-
+- else /* G_VARIANT_MEMBER_ENDING_OFFSET */
+- end = gvs_read_unaligned_le (value.data + value.size -
+- offset_size * (member_info->i + 2),
+- offset_size);
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+
+ /* The child should not extend into the offset table. */
+ if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
new file mode 100644
index 0000000000..7e516b07ab
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0005.patch
@@ -0,0 +1,80 @@
+From 73d0aa81c2575a5c9ae77dcb94da919579014fc0 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Fri, 11 Aug 2023 04:13:02 +0000
+Subject: [PATCH] gvariant-serialiser: Rework child size calculation
+
+This reduces a few duplicate calls to `g_variant_type_info_query()` and
+explains why they’re needed.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/73d0aa81c2575a5c9ae77dcb94da919579014fc0]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-serialiser.c | 31 +++++++++----------------------
+ 1 file changed, 9 insertions(+), 22 deletions(-)
+
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index 6f9b366..fb75923 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -1007,14 +1007,18 @@ gvs_tuple_get_child (GVariantSerialised value,
+ child.depth = value.depth + 1;
+ offset_size = gvs_get_offset_size (value.size);
+
++ /* Ensure the size is set for fixed-sized children, or
++ * g_variant_serialised_check() will fail, even if we return
++ * (child.data == NULL) to indicate an error. */
++ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_FIXED)
++ g_variant_type_info_query (child.type_info, NULL, &child.size);
++
+ /* tuples are the only (potentially) fixed-sized containers, so the
+ * only ones that have to deal with the possibility of having %NULL
+ * data with a non-zero %size if errors occurred elsewhere.
+ */
+ if G_UNLIKELY (value.data == NULL && value.size != 0)
+ {
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+ /* this can only happen in fixed-sized tuples,
+ * so the child must also be fixed sized.
+ */
+@@ -1032,29 +1036,12 @@ gvs_tuple_get_child (GVariantSerialised value,
+ else
+ {
+ if (offset_size * (member_info->i + 1) > value.size)
+- {
+- /* if the child is fixed size, return its size.
+- * if child is not fixed-sized, return size = 0.
+- */
+- g_variant_type_info_query (child.type_info, NULL, &child.size);
+-
+- return child;
+- }
++ return child;
+ }
+
+- gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
+-
+ /* The child should not extend into the offset table. */
+- if (index_ != g_variant_type_info_n_members (value.type_info) - 1)
+- {
+- GVariantSerialised last_child;
+- last_child = gvs_tuple_get_child (value,
+- g_variant_type_info_n_members (value.type_info) - 1);
+- last_end = last_child.data + last_child.size - value.data;
+- g_variant_type_info_unref (last_child.type_info);
+- }
+- else
+- last_end = end;
++ gvs_tuple_get_member_bounds (value, index_, offset_size, &start, &end);
++ gvs_tuple_get_member_bounds (value, g_variant_type_info_n_members (value.type_info) - 1, offset_size, NULL, &last_end);
+
+ if (start < end && end <= value.size && end <= last_end)
+ {
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
new file mode 100644
index 0000000000..8558a7911f
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0006.patch
@@ -0,0 +1,396 @@
+From 7cf6f5b69146d20948d42f0c476688fe17fef787 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 12:09:06 +0000
+Subject: [PATCH] gvariant: Don't allow child elements of a tuple to overlap
+ each other
+
+This is similar to the earlier commit which prevents child elements of a
+variable-sized array from overlapping each other, but this time for
+tuples. It is based heavily on ideas by William Manley.
+
+Tuples are slightly different from variable-sized arrays in that they
+contain a mixture of fixed and variable sized elements. All but one of
+the variable sized elements have an entry in the frame offsets table.
+This means that if we were to just check the ordering of the frame
+offsets table, the variable sized elements could still overlap
+interleaving fixed sized elements, which would be bad.
+
+Therefore we have to check the elements rather than the frame offsets.
+
+The logic of checking the elements up to the index currently being
+requested, and caching the result in `ordered_offsets_up_to`, means that
+the algorithmic cost implications are the same for this commit as for
+variable-sized arrays: an O(N) cost for these checks is amortised out
+over N accesses to O(1) per access.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Fixes: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/7cf6f5b69146d20948d42f0c476688fe17fef787]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 6 +-
+ glib/gvariant-serialiser.c | 40 ++++++++
+ glib/gvariant-serialiser.h | 7 +-
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 181 +++++++++++++++++++++++++++++++++++++
+ 5 files changed, 232 insertions(+), 3 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index 9b51e15..b951cd9 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2007, 2008 Ryan Lortie
+ * Copyright © 2010 Codethink Limited
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -179,7 +180,7 @@ struct _GVariant
+ * offsets themselves.
+ *
+ * This field is only relevant for arrays of non
+- * fixed width types.
++ * fixed width types and for tuples.
+ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+@@ -1117,6 +1118,9 @@ g_variant_get_child_value (GVariant *value,
+ */
+ s_child = g_variant_serialised_get_child (serialised, index_);
+
++ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
++ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+ * as all other deeply-nested types have a static type, and hence should
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index fb75923..cd4a3e6 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -942,6 +942,10 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+ * for the tuple. See the notes in gvarianttypeinfo.h.
+ */
+
++/* Note: This doesn’t guarantee that @out_member_end >= @out_member_start; that
++ * condition may not hold true for invalid serialised variants. The caller is
++ * responsible for checking the returned values and handling invalid ones
++ * appropriately. */
+ static void
+ gvs_tuple_get_member_bounds (GVariantSerialised value,
+ gsize index_,
+@@ -1028,6 +1032,42 @@ gvs_tuple_get_child (GVariantSerialised value,
+ return child;
+ }
+
++ /* If the requested @index_ is beyond the set of indices whose framing offsets
++ * have been checked, check the remaining offsets to see whether they’re
++ * normal (in order, no overlapping tuple elements).
++ *
++ * Unlike the checks in gvs_variable_sized_array_get_child(), we have to check
++ * all the tuple *elements* here, not just all the framing offsets, since
++ * tuples contain a mix of elements which use framing offsets and ones which
++ * don’t. None of them are allowed to overlap. */
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ gsize i, prev_i_end = 0;
++
++ if (value.ordered_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++
++ for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ {
++ gsize i_start, i_end;
++
++ gvs_tuple_get_member_bounds (value, i, offset_size, &i_start, &i_end);
++
++ if (i_start > i_end || i_start < prev_i_end || i_end > value.size)
++ break;
++
++ prev_i_end = i_end;
++ }
++
++ value.ordered_offsets_up_to = i - 1;
++ }
++
++ if (index_ > value.ordered_offsets_up_to)
++ {
++ /* Offsets are invalid somewhere, so return an empty child. */
++ return child;
++ }
++
+ if (member_info->ending_type == G_VARIANT_MEMBER_ENDING_OFFSET)
+ {
+ if (offset_size * (member_info->i + 2) > value.size)
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 99d18ef..144aec8 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -34,8 +34,11 @@ typedef struct
+ * This guarantees that the bytes of element n don't overlap with any previous
+ * element.
+ *
+- * This is both read and set by g_variant_serialised_get_child for arrays of
+- * non-fixed-width types */
++ * This is both read and set by g_variant_serialised_get_child() for arrays of
++ * non-fixed-width types, and for tuples.
++ *
++ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
++ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
+ } GVariantSerialised;
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index d6f68a9..cdb428e 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5945,6 +5945,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.type_info = g_variant_get_type_info (trusted);
+ serialised.size = g_variant_get_size (trusted);
+ serialised.data = g_malloc (serialised.size);
++ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 967e9a1..a84b02e 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1,6 +1,7 @@
+ /*
+ * Copyright © 2010 Codethink Limited
+ * Copyright © 2020 William Manley
++ * Copyright © 2022 Endless OS Foundation, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+@@ -1451,6 +1452,7 @@ test_maybe (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1574,6 +1576,7 @@ test_array (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1738,6 +1741,7 @@ test_tuple (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1834,6 +1838,7 @@ test_variant (void)
+ serialised.data = flavoured_malloc (needed_size, flavour);
+ serialised.size = needed_size;
+ serialised.depth = 0;
++ serialised.ordered_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+@@ -5106,6 +5111,176 @@ test_normal_checking_tuple_offsets (void)
+ g_variant_unref (variant);
+ }
+
++/* This is a regression test that we can't have non-normal values that take up
++ * significantly more space than the normal equivalent, by specifying the
++ * offset table entries so that tuple elements overlap.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838503 and
++ * https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_838513 */
++static void
++test_normal_checking_tuple_offsets2 (void)
++{
++ const GVariantType *data_type = G_VARIANT_TYPE ("(yyaiyyaiyy)");
++ const guint8 data[] = {
++ 0x12, 0x34, 0x56, 0x78, 0x01,
++ /*
++ ^───────────────────┘
++
++ ^^^^^^^^^^ 1st yy
++ ^^^^^^^^^^ 2nd yy
++ ^^^^^^^^^^ 3rd yy
++ ^^^^ Framing offsets
++ */
++
++ /* If this variant was encoded normally, it would be something like this:
++ * 0x12, 0x34, pad, pad, [array bytes], 0x56, 0x78, pad, pad, [array bytes], 0x9A, 0xBC, 0xXX
++ * ^─────────────────────────────────────────────────────┘
++ *
++ * ^^^^^^^^^^ 1st yy
++ * ^^^^^^^^^^ 2nd yy
++ * ^^^^^^^^^^ 3rd yy
++ * ^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed (
++ "@(yyaiyyaiyy) (0x12, 0x34, [], 0x00, 0x00, [], 0x00, 0x00)");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets3 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second and third `ay`s must be given default values because of rule
++ * “End Boundary Precedes Start Boundary”.
++ *
++ * The `i` must be given a default value because of rule “Start or End
++ * Boundary of a Child Falls Outside the Container”.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayiay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ i, bytes 0-4
++ 3rd ay, bytes 4-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayiay) ([], [], 0, [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
++/* This is a regression test that overlapping entries in the offset table are
++ * decoded consistently, even though they’re non-normal.
++ *
++ * See https://gitlab.gnome.org/GNOME/glib/-/issues/2121#note_910935 */
++static void
++test_normal_checking_tuple_offsets4 (void)
++{
++ /* The expected decoding of this non-normal byte stream is complex. See
++ * section 2.7.3 (Handling Non-Normal Serialised Data) of the GVariant
++ * specification.
++ *
++ * The rule “Child Values Overlapping Framing Offsets” from the specification
++ * says that the first `ay` must be decoded as `[0x01]` even though it
++ * overlaps the first byte of the offset table. However, since commit
++ * 7eedcd76f7d5b8c98fa60013e1fe6e960bf19df3, GLib explicitly doesn’t allow
++ * this as it’s exploitable. So the first `ay` must be given a default value.
++ *
++ * The second `ay` must be given a default value because of rule “End Boundary
++ * Precedes Start Boundary”.
++ *
++ * The third `ay` must be given a default value because its framing offsets
++ * overlap that of the first `ay`.
++ */
++ const GVariantType *data_type = G_VARIANT_TYPE ("(ayayay)");
++ const guint8 data[] = {
++ 0x01, 0x00, 0x02,
++ /*
++ ^──┘
++
++ ^^^^^^^^^^ 1st ay, bytes 0-2 (but given a default value anyway, see above)
++ 2nd ay, bytes 2-0
++ 3rd ay, bytes 0-1
++ ^^^^^^^^^^ Framing offsets
++ */
++ };
++ gsize size = sizeof (data);
++ GVariant *variant = NULL;
++ GVariant *normal_variant = NULL;
++ GVariant *expected = NULL;
++
++ variant = g_variant_new_from_data (data_type, data, size, FALSE, NULL, NULL);
++ g_assert_nonnull (variant);
++
++ g_assert_false (g_variant_is_normal_form (variant));
++
++ normal_variant = g_variant_get_normal_form (variant);
++ g_assert_nonnull (normal_variant);
++ g_assert_cmpuint (g_variant_get_size (normal_variant), <=, size * 3);
++
++ expected = g_variant_new_parsed ("@(ayayay) ([], [], [])");
++ g_assert_cmpvariant (expected, variant);
++ g_assert_cmpvariant (expected, normal_variant);
++
++ g_variant_unref (expected);
++ g_variant_unref (normal_variant);
++ g_variant_unref (variant);
++}
++
+ /* Test that an empty object path is normalised successfully to the base object
+ * path, ‘/’. */
+ static void
+@@ -5253,6 +5428,12 @@ main (int argc, char **argv)
+ test_normal_checking_array_offsets2);
+ g_test_add_func ("/gvariant/normal-checking/tuple-offsets",
+ test_normal_checking_tuple_offsets);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets2",
++ test_normal_checking_tuple_offsets2);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets3",
++ test_normal_checking_tuple_offsets3);
++ g_test_add_func ("/gvariant/normal-checking/tuple-offsets4",
++ test_normal_checking_tuple_offsets4);
+ g_test_add_func ("/gvariant/normal-checking/empty-object-path",
+ test_normal_checking_empty_object_path);
+
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
new file mode 100644
index 0000000000..83d0205160
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0007.patch
@@ -0,0 +1,49 @@
+From e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Wed, 16 Aug 2023 03:42:47 +0000
+Subject: [PATCH] gvariant: Port g_variant_deep_copy() to count its iterations
+ directly
+
+This is equivalent to what `GVariantIter` does, but it means that
+`g_variant_deep_copy()` is making its own `g_variant_get_child_value()`
+calls.
+
+This will be useful in an upcoming commit, where those child values will
+be inspected a little more deeply.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/e6490c84e84ba9f182fbd83b51ff4f9f5a0a1793]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index cdb428e..fdd36be 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5799,14 +5799,13 @@ g_variant_deep_copy (GVariant *value)
+ case G_VARIANT_CLASS_VARIANT:
+ {
+ GVariantBuilder builder;
+- GVariantIter iter;
+- GVariant *child;
++ gsize i, n_children;
+
+ g_variant_builder_init (&builder, g_variant_get_type (value));
+- g_variant_iter_init (&iter, value);
+
+- while ((child = g_variant_iter_next_value (&iter)))
++ for (i = 0, n_children = g_variant_n_children (value); i < n_children; i++)
+ {
++ GVariant *child = g_variant_get_child_value (value, i);
+ g_variant_builder_add_value (&builder, g_variant_deep_copy (child));
+ g_variant_unref (child);
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
new file mode 100644
index 0000000000..f098548618
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0008.patch
@@ -0,0 +1,394 @@
+From d1a293c4e29880b8d17bb826c9a426a440ca4a91 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:30:38 +0000
+Subject: [PATCH] gvariant: Track checked and ordered offsets independently
+
+The past few commits introduced the concept of known-good offsets in the
+offset table (which is used for variable-width arrays and tuples).
+Good offsets are ones which are non-overlapping with all the previous
+offsets in the table.
+
+If a bad offset is encountered when indexing into the array or tuple,
+the cached known-good offset index will not be increased. In this way,
+all child variants at and beyond the first bad offset can be returned as
+default values rather than dereferencing potentially invalid data.
+
+In this case, there was no information about the fact that the indexes
+between the highest known-good index and the requested one had been
+checked already. That could lead to a pathological case where an offset
+table with an invalid first offset is repeatedly checked in full when
+trying to access higher-indexed children.
+
+Avoid that by storing the index of the highest checked offset in the
+table, as well as the index of the highest good/ordered offset.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/d1a293c4e29880b8d17bb826c9a426a440ca4a91]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant-core.c | 28 ++++++++++++++++++++++++
+ glib/gvariant-serialiser.c | 44 +++++++++++++++++++++++++++-----------
+ glib/gvariant-serialiser.h | 9 ++++++++
+ glib/gvariant.c | 1 +
+ glib/tests/gvariant.c | 5 +++++
+ 5 files changed, 75 insertions(+), 12 deletions(-)
+
+diff --git a/glib/gvariant-core.c b/glib/gvariant-core.c
+index b951cd9..1b9d5cc 100644
+--- a/glib/gvariant-core.c
++++ b/glib/gvariant-core.c
+@@ -67,6 +67,7 @@ struct _GVariant
+ GBytes *bytes;
+ gconstpointer data;
+ gsize ordered_offsets_up_to;
++ gsize checked_offsets_up_to;
+ } serialised;
+
+ struct
+@@ -182,6 +183,24 @@ struct _GVariant
+ * This field is only relevant for arrays of non
+ * fixed width types and for tuples.
+ *
++ * .checked_offsets_up_to: Similarly to .ordered_offsets_up_to, this stores
++ * the index of the highest element, n, whose frame
++ * offsets (and all the preceding frame offsets)
++ * have been checked for validity.
++ *
++ * It is always the case that
++ * .checked_offsets_up_to ≥ .ordered_offsets_up_to.
++ *
++ * If .checked_offsets_up_to == .ordered_offsets_up_to,
++ * then a bad offset has not been found so far.
++ *
++ * If .checked_offsets_up_to > .ordered_offsets_up_to,
++ * then a bad offset has been found at
++ * (.ordered_offsets_up_to + 1).
++ *
++ * This field is only relevant for arrays of non
++ * fixed width types and for tuples.
++ *
+ * .tree: Only valid when the instance is in tree form.
+ *
+ * Note that accesses from other threads could result in
+@@ -386,6 +405,7 @@ g_variant_to_serialised (GVariant *value)
+ value->size,
+ value->depth,
+ value->contents.serialised.ordered_offsets_up_to,
++ value->contents.serialised.checked_offsets_up_to,
+ };
+ return serialised;
+ }
+@@ -418,6 +438,7 @@ g_variant_serialise (GVariant *value,
+ serialised.data = data;
+ serialised.depth = value->depth;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ children = (gpointer *) value->contents.tree.children;
+ n_children = value->contents.tree.n_children;
+@@ -464,10 +485,12 @@ g_variant_fill_gvs (GVariantSerialised *serialised,
+ if (value->state & STATE_SERIALISED)
+ {
+ serialised->ordered_offsets_up_to = value->contents.serialised.ordered_offsets_up_to;
++ serialised->checked_offsets_up_to = value->contents.serialised.checked_offsets_up_to;
+ }
+ else
+ {
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+ }
+
+ if (serialised->data)
+@@ -513,6 +536,7 @@ g_variant_ensure_serialised (GVariant *value)
+ value->contents.serialised.data = g_bytes_get_data (bytes, NULL);
+ value->contents.serialised.bytes = bytes;
+ value->contents.serialised.ordered_offsets_up_to = G_MAXSIZE;
++ value->contents.serialised.checked_offsets_up_to = G_MAXSIZE;
+ value->state |= STATE_SERIALISED;
+ }
+ }
+@@ -594,6 +618,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ serialised.data = (guchar *) g_bytes_get_data (bytes, &serialised.size);
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ if (!g_variant_serialised_check (serialised))
+ {
+@@ -644,6 +669,7 @@ g_variant_new_from_bytes (const GVariantType *type,
+ }
+
+ value->contents.serialised.ordered_offsets_up_to = trusted ? G_MAXSIZE : 0;
++ value->contents.serialised.checked_offsets_up_to = trusted ? G_MAXSIZE : 0;
+
+ g_clear_pointer (&owned_bytes, g_bytes_unref);
+
+@@ -1120,6 +1146,7 @@ g_variant_get_child_value (GVariant *value,
+
+ /* Update the cached ordered_offsets_up_to, since @serialised will be thrown away when this function exits */
+ value->contents.serialised.ordered_offsets_up_to = MAX (value->contents.serialised.ordered_offsets_up_to, serialised.ordered_offsets_up_to);
++ value->contents.serialised.checked_offsets_up_to = MAX (value->contents.serialised.checked_offsets_up_to, serialised.checked_offsets_up_to);
+
+ /* Check whether this would cause nesting too deep. If so, return a fake
+ * child. The only situation we expect this to happen in is with a variant,
+@@ -1147,6 +1174,7 @@ g_variant_get_child_value (GVariant *value,
+ g_bytes_ref (value->contents.serialised.bytes);
+ child->contents.serialised.data = s_child.data;
+ child->contents.serialised.ordered_offsets_up_to = s_child.ordered_offsets_up_to;
++ child->contents.serialised.checked_offsets_up_to = s_child.checked_offsets_up_to;
+
+ return child;
+ }
+diff --git a/glib/gvariant-serialiser.c b/glib/gvariant-serialiser.c
+index cd4a3e6..0bf7243 100644
+--- a/glib/gvariant-serialiser.c
++++ b/glib/gvariant-serialiser.c
+@@ -120,6 +120,8 @@
+ *
+ * @depth has no restrictions; the depth of a top-level serialised #GVariant is
+ * zero, and it increases for each level of nested child.
++ *
++ * @checked_offsets_up_to is always ≥ @ordered_offsets_up_to
+ */
+
+ /* < private >
+@@ -147,6 +149,9 @@ g_variant_serialised_check (GVariantSerialised serialised)
+ !(serialised.size == 0 || serialised.data != NULL))
+ return FALSE;
+
++ if (serialised.ordered_offsets_up_to > serialised.checked_offsets_up_to)
++ return FALSE;
++
+ /* Depending on the native alignment requirements of the machine, the
+ * compiler will insert either 3 or 7 padding bytes after the char.
+ * This will result in the sizeof() the struct being 12 or 16.
+@@ -266,6 +271,7 @@ gvs_fixed_sized_maybe_get_child (GVariantSerialised value,
+ g_variant_type_info_ref (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -297,7 +303,7 @@ gvs_fixed_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size, value.depth + 1, 0, 0 };
+
+ gvs_filler (&child, children[0]);
+ }
+@@ -320,6 +326,7 @@ gvs_fixed_sized_maybe_is_normal (GVariantSerialised value)
+ value.type_info = g_variant_type_info_element (value.type_info);
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -362,6 +369,7 @@ gvs_variable_sized_maybe_get_child (GVariantSerialised value,
+
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return value;
+ }
+@@ -392,7 +400,7 @@ gvs_variable_sized_maybe_serialise (GVariantSerialised value,
+ {
+ if (n_children)
+ {
+- GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0 };
++ GVariantSerialised child = { NULL, value.data, value.size - 1, value.depth + 1, 0, 0 };
+
+ /* write the data for the child. */
+ gvs_filler (&child, children[0]);
+@@ -413,6 +421,7 @@ gvs_variable_sized_maybe_is_normal (GVariantSerialised value)
+ value.size--;
+ value.depth++;
+ value.ordered_offsets_up_to = 0;
++ value.checked_offsets_up_to = 0;
+
+ return g_variant_serialised_is_normal (value);
+ }
+@@ -739,39 +748,46 @@ gvs_variable_sized_array_get_child (GVariantSerialised value,
+
+ /* If the requested @index_ is beyond the set of indices whose framing offsets
+ * have been checked, check the remaining offsets to see whether they’re
+- * normal (in order, no overlapping array elements). */
+- if (index_ > value.ordered_offsets_up_to)
++ * normal (in order, no overlapping array elements).
++ *
++ * Don’t bother checking if the highest known-good offset is lower than the
++ * highest checked offset, as that means there’s an invalid element at that
++ * index, so there’s no need to check further. */
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ switch (offsets.offset_size)
+ {
+ case 1:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint8 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 2:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint16 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 4:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint32 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ case 8:
+ {
+ value.ordered_offsets_up_to = find_unordered_guint64 (
+- offsets.array, value.ordered_offsets_up_to, index_ + 1);
++ offsets.array, value.checked_offsets_up_to, index_ + 1);
+ break;
+ }
+ default:
+ /* gvs_get_offset_size() only returns maximum 8 */
+ g_assert_not_reached ();
+ }
++
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -916,6 +932,7 @@ gvs_variable_sized_array_is_normal (GVariantSerialised value)
+
+ /* All offsets have now been checked. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ return TRUE;
+ }
+@@ -1040,14 +1057,15 @@ gvs_tuple_get_child (GVariantSerialised value,
+ * all the tuple *elements* here, not just all the framing offsets, since
+ * tuples contain a mix of elements which use framing offsets and ones which
+ * don’t. None of them are allowed to overlap. */
+- if (index_ > value.ordered_offsets_up_to)
++ if (index_ > value.checked_offsets_up_to &&
++ value.ordered_offsets_up_to == value.checked_offsets_up_to)
+ {
+ gsize i, prev_i_end = 0;
+
+- if (value.ordered_offsets_up_to > 0)
+- gvs_tuple_get_member_bounds (value, value.ordered_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
++ if (value.checked_offsets_up_to > 0)
++ gvs_tuple_get_member_bounds (value, value.checked_offsets_up_to - 1, offset_size, NULL, &prev_i_end);
+
+- for (i = value.ordered_offsets_up_to; i <= index_; i++)
++ for (i = value.checked_offsets_up_to; i <= index_; i++)
+ {
+ gsize i_start, i_end;
+
+@@ -1060,6 +1078,7 @@ gvs_tuple_get_child (GVariantSerialised value,
+ }
+
+ value.ordered_offsets_up_to = i - 1;
++ value.checked_offsets_up_to = index_;
+ }
+
+ if (index_ > value.ordered_offsets_up_to)
+@@ -1257,6 +1276,7 @@ gvs_tuple_is_normal (GVariantSerialised value)
+
+ /* All element bounds have been checked above. */
+ value.ordered_offsets_up_to = G_MAXSIZE;
++ value.checked_offsets_up_to = G_MAXSIZE;
+
+ {
+ gsize fixed_size;
+diff --git a/glib/gvariant-serialiser.h b/glib/gvariant-serialiser.h
+index 144aec8..e132451 100644
+--- a/glib/gvariant-serialiser.h
++++ b/glib/gvariant-serialiser.h
+@@ -40,6 +40,15 @@ typedef struct
+ * Even when dealing with tuples, @ordered_offsets_up_to is an element index,
+ * rather than an index into the frame offsets. */
+ gsize ordered_offsets_up_to;
++
++ /* Similar to @ordered_offsets_up_to. This gives the index of the child element
++ * whose frame offset is the highest in the offset table which has been
++ * checked so far.
++ *
++ * This is always ≥ @ordered_offsets_up_to. It is always an element index.
++ *
++ * See documentation in gvariant-core.c for `struct GVariant` for details. */
++ gsize checked_offsets_up_to;
+ } GVariantSerialised;
+
+ /* deserialisation */
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index fdd36be..f910bd4 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5945,6 +5945,7 @@ g_variant_byteswap (GVariant *value)
+ serialised.size = g_variant_get_size (trusted);
+ serialised.data = g_malloc (serialised.size);
+ serialised.ordered_offsets_up_to = G_MAXSIZE; /* operating on the normal form */
++ serialised.checked_offsets_up_to = G_MAXSIZE;
+ g_variant_store (trusted, serialised.data);
+ g_variant_unref (trusted);
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index a84b02e..640f3c0 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1286,6 +1286,7 @@ random_instance_filler (GVariantSerialised *serialised,
+
+ serialised->depth = 0;
+ serialised->ordered_offsets_up_to = 0;
++ serialised->checked_offsets_up_to = 0;
+
+ g_assert_true (serialised->type_info == instance->type_info);
+ g_assert_cmpuint (serialised->size, ==, instance->size);
+@@ -1453,6 +1454,7 @@ test_maybe (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised,
+ random_instance_filler,
+@@ -1577,6 +1579,7 @@ test_array (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1742,6 +1745,7 @@ test_tuple (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) instances, n_children);
+@@ -1839,6 +1843,7 @@ test_variant (void)
+ serialised.size = needed_size;
+ serialised.depth = 0;
+ serialised.ordered_offsets_up_to = 0;
++ serialised.checked_offsets_up_to = 0;
+
+ g_variant_serialiser_serialise (serialised, random_instance_filler,
+ (gpointer *) &instance, 1);
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
new file mode 100644
index 0000000000..a523e60b91
--- /dev/null
+++ b/meta/recipes-core/glib-2.0/glib-2.0/CVE-2023-32665-0009.patch
@@ -0,0 +1,97 @@
+From 298a537d5f6783e55d87e40011ee3fd3b22b72f9 Mon Sep 17 00:00:00 2001
+From: Philip Withnall <pwithnall@endlessos.org>
+Date: Thu, 17 Aug 2023 01:39:01 +0000
+Subject: [PATCH] gvariant: Zero-initialise various GVariantSerialised objects
+
+The following few commits will add a couple of new fields to
+`GVariantSerialised`, and they should be zero-filled by default.
+
+Try and pre-empt that a bit by zero-filling `GVariantSerialised` by
+default in a few places.
+
+Signed-off-by: Philip Withnall <pwithnall@endlessos.org>
+
+Helps: #2121
+
+CVE: CVE-2023-32665
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/glib/-/commit/298a537d5f6783e55d87e40011ee3fd3b22b72f9]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ glib/gvariant.c | 2 +-
+ glib/tests/gvariant.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/glib/gvariant.c b/glib/gvariant.c
+index f910bd4..8ba701e 100644
+--- a/glib/gvariant.c
++++ b/glib/gvariant.c
+@@ -5936,7 +5936,7 @@ g_variant_byteswap (GVariant *value)
+ if (alignment)
+ /* (potentially) contains multi-byte numeric data */
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariant *trusted;
+ GBytes *bytes;
+
+diff --git a/glib/tests/gvariant.c b/glib/tests/gvariant.c
+index 640f3c0..d640c81 100644
+--- a/glib/tests/gvariant.c
++++ b/glib/tests/gvariant.c
+@@ -1446,7 +1446,7 @@ test_maybe (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -1572,7 +1572,7 @@ test_array (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = array_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1738,7 +1738,7 @@ test_tuple (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+
+ serialised.type_info = type_info;
+ serialised.data = flavoured_malloc (needed_size, flavour);
+@@ -1835,7 +1835,7 @@ test_variant (void)
+
+ for (flavour = 0; flavour < 8; flavour += alignment)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ GVariantSerialised child;
+
+ serialised.type_info = type_info;
+@@ -2284,7 +2284,7 @@ serialise_tree (TreeInstance *tree,
+ static void
+ test_byteswap (void)
+ {
+- GVariantSerialised one, two;
++ GVariantSerialised one = { 0, }, two = { 0, };
+ TreeInstance *tree;
+
+ tree = tree_instance_new (NULL, 3);
+@@ -2358,7 +2358,7 @@ test_serialiser_children (void)
+ static void
+ test_fuzz (gdouble *fuzziness)
+ {
+- GVariantSerialised serialised;
++ GVariantSerialised serialised = { 0, };
+ TreeInstance *tree;
+
+ /* make an instance */
+--
+2.24.4
+
diff --git a/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb b/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
index c2145bc6c2..60a6b843c1 100644
--- a/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
+++ b/meta/recipes-core/glib-2.0/glib-2.0_2.62.6.bb
@@ -42,6 +42,20 @@ SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \
file://CVE-2021-28153-3.patch \
file://CVE-2021-28153-4.patch \
file://CVE-2021-28153-5.patch \
+ file://CVE-2023-32665-0001.patch \
+ file://CVE-2023-32665-0002.patch \
+ file://CVE-2023-32665-0003.patch \
+ file://CVE-2023-32665-0004.patch \
+ file://CVE-2023-32665-0005.patch \
+ file://CVE-2023-32665-0006.patch \
+ file://CVE-2023-32665-0007.patch \
+ file://CVE-2023-32665-0008.patch \
+ file://CVE-2023-32665-0009.patch \
+ file://CVE-2023-29499.patch \
+ file://CVE-2023-32611-0001.patch \
+ file://CVE-2023-32611-0002.patch \
+ file://CVE-2023-32643.patch \
+ file://CVE-2023-32636.patch \
"
SRC_URI_append_class-native = " file://relocate-modules.patch"
diff --git a/meta/recipes-core/glibc/glibc-version.inc b/meta/recipes-core/glibc/glibc-version.inc
index 68efd09ece..95e2bba301 100644
--- a/meta/recipes-core/glibc/glibc-version.inc
+++ b/meta/recipes-core/glibc/glibc-version.inc
@@ -1,6 +1,6 @@
SRCBRANCH ?= "release/2.31/master"
PV = "2.31+git${SRCPV}"
-SRCREV_glibc ?= "3ef8be9b89ef98300951741f381eb79126ac029f"
+SRCREV_glibc ?= "2d4f26e5cfda682f9ce61444b81533b83f6381af"
SRCREV_localedef ?= "cd9f958c4c94a638fa7b2b4e21627364f1a1a655"
GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git"
diff --git a/meta/recipes-core/glibc/glibc.inc b/meta/recipes-core/glibc/glibc.inc
index 23a6ca99ae..e42040f3dc 100644
--- a/meta/recipes-core/glibc/glibc.inc
+++ b/meta/recipes-core/glibc/glibc.inc
@@ -1,7 +1,9 @@
require glibc-common.inc
require glibc-ld.inc
-DEPENDS = "virtual/${TARGET_PREFIX}gcc libgcc-initial linux-libc-headers"
+DEPENDS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}binutils${BUSUFFIX} libgcc-initial linux-libc-headers"
+BUSUFFIX= ""
+BUSUFFIX:class-nativesdk = "-crosssdk"
PROVIDES = "virtual/libc"
PROVIDES += "virtual/libintl virtual/libiconv"
diff --git a/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch b/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch
index cef0ce54ed..7561e87121 100644
--- a/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch
+++ b/meta/recipes-core/glibc/glibc/CVE-2021-33574_1.patch
@@ -11,14 +11,10 @@ CVE: CVE-2021-33574 patch#1
Signed-off-by: Armin Kuster <akuster@mvista.com>
---
- NEWS | 4 ++++
- sysdeps/unix/sysv/linux/mq_notify.c | 15 ++++++++++-----
- 2 files changed, 14 insertions(+), 5 deletions(-)
-
-Index: git/NEWS
-===================================================================
---- git.orig/NEWS
-+++ git/NEWS
+diff --git a/NEWS b/NEWS
+index 8a20d3c4e3..be489243ac 100644
+--- a/NEWS
++++ b/NEWS
@@ -7,6 +7,10 @@ using `glibc' in the "product" field.
Version 2.31.1
@@ -28,12 +24,12 @@ Index: git/NEWS
+ attribute with a non-default affinity mask.
+
The following bugs are resolved with this release:
+ [14231] stdio-common tests memory requirements
[19519] iconv(1) with -c option hangs on illegal multi-byte sequences
- (CVE-2016-10228)
-Index: git/sysdeps/unix/sysv/linux/mq_notify.c
-===================================================================
---- git.orig/sysdeps/unix/sysv/linux/mq_notify.c
-+++ git/sysdeps/unix/sysv/linux/mq_notify.c
+diff --git a/sysdeps/unix/sysv/linux/mq_notify.c b/sysdeps/unix/sysv/linux/mq_notify.c
+index f288bac477..dd47f0b777 100644
+--- a/sysdeps/unix/sysv/linux/mq_notify.c
++++ b/sysdeps/unix/sysv/linux/mq_notify.c
@@ -135,8 +135,11 @@ helper_thread (void *arg)
(void) __pthread_barrier_wait (&notify_barrier);
}
@@ -48,7 +44,7 @@ Index: git/sysdeps/unix/sysv/linux/mq_notify.c
}
return NULL;
}
-@@ -257,8 +260,7 @@ mq_notify (mqd_t mqdes, const struct sig
+@@ -257,8 +260,7 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification)
if (data.attr == NULL)
return -1;
@@ -58,7 +54,7 @@ Index: git/sysdeps/unix/sysv/linux/mq_notify.c
}
/* Construct the new request. */
-@@ -272,7 +274,10 @@ mq_notify (mqd_t mqdes, const struct sig
+@@ -272,7 +274,10 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification)
/* If it failed, free the allocated memory. */
if (__glibc_unlikely (retval != 0))
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch b/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch
new file mode 100644
index 0000000000..10c7e5666d
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-0687.patch
@@ -0,0 +1,82 @@
+From 952aff5c00ad7c6b83c3f310f2643939538827f8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=D0=9B=D0=B5=D0=BE=D0=BD=D0=B8=D0=B4=20=D0=AE=D1=80=D1=8C?=
+ =?UTF-8?q?=D0=B5=D0=B2=20=28Leonid=20Yuriev=29?= <leo@yuriev.ru>
+Date: Sat, 4 Feb 2023 14:41:38 +0300
+Subject: [PATCH] gmon: Fix allocated buffer overflow (bug 29444)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The `__monstartup()` allocates a buffer used to store all the data
+accumulated by the monitor.
+
+The size of this buffer depends on the size of the internal structures
+used and the address range for which the monitor is activated, as well
+as on the maximum density of call instructions and/or callable functions
+that could be potentially on a segment of executable code.
+
+In particular a hash table of arcs is placed at the end of this buffer.
+The size of this hash table is calculated in bytes as
+ p->fromssize = p->textsize / HASHFRACTION;
+
+but actually should be
+ p->fromssize = ROUNDUP(p->textsize / HASHFRACTION, sizeof(*p->froms));
+
+This results in writing beyond the end of the allocated buffer when an
+added arc corresponds to a call near from the end of the monitored
+address range, since `_mcount()` check the incoming caller address for
+monitored range but not the intermediate result hash-like index that
+uses to write into the table.
+
+It should be noted that when the results are output to `gmon.out`, the
+table is read to the last element calculated from the allocated size in
+bytes, so the arcs stored outside the buffer boundary did not fall into
+`gprof` for analysis. Thus this "feature" help me to found this bug
+during working with https://sourceware.org/bugzilla/show_bug.cgi?id=29438
+
+Just in case, I will explicitly note that the problem breaks the
+`make test t=gmon/tst-gmon-dso` added for Bug 29438.
+There, the arc of the `f3()` call disappears from the output, since in
+the DSO case, the call to `f3` is located close to the end of the
+monitored range.
+
+Signed-off-by: Леонид Юрьев (Leonid Yuriev) <leo@yuriev.ru>
+
+Another minor error seems a related typo in the calculation of
+`kcountsize`, but since kcounts are smaller than froms, this is
+actually to align the p->froms data.
+
+Co-authored-by: DJ Delorie <dj@redhat.com>
+Reviewed-by: Carlos O'Donell <carlos@redhat.com>
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commit;h=801af9fafd4689337ebf27260aa115335a0cb2bc]
+CVE: CVE-2023-0687
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ gmon/gmon.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/gmon/gmon.c b/gmon/gmon.c
+index dee6480..bf76358 100644
+--- a/gmon/gmon.c
++++ b/gmon/gmon.c
+@@ -132,6 +132,8 @@ __monstartup (u_long lowpc, u_long highpc)
+ p->lowpc = ROUNDDOWN(lowpc, HISTFRACTION * sizeof(HISTCOUNTER));
+ p->highpc = ROUNDUP(highpc, HISTFRACTION * sizeof(HISTCOUNTER));
+ p->textsize = p->highpc - p->lowpc;
++ /* This looks like a typo, but it's here to align the p->froms
++ section. */
+ p->kcountsize = ROUNDUP(p->textsize / HISTFRACTION, sizeof(*p->froms));
+ p->hashfraction = HASHFRACTION;
+ p->log_hashfraction = -1;
+@@ -142,7 +144,7 @@ __monstartup (u_long lowpc, u_long highpc)
+ instead of integer division. Precompute shift amount. */
+ p->log_hashfraction = ffs(p->hashfraction * sizeof(*p->froms)) - 1;
+ }
+- p->fromssize = p->textsize / HASHFRACTION;
++ p->fromssize = ROUNDUP(p->textsize / HASHFRACTION, sizeof(*p->froms));
+ p->tolimit = p->textsize * ARCDENSITY / 100;
+ if (p->tolimit < MINARCS)
+ p->tolimit = MINARCS;
+--
+2.7.4
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch b/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch
new file mode 100644
index 0000000000..c7db4038c2
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-4813.patch
@@ -0,0 +1,986 @@
+From 1c37b8022e8763fedbb3f79c02e05c6acfe5a215 Mon Sep 17 00:00:00 2001
+From: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Date: Thu, 17 Mar 2022 11:44:34 +0530
+Subject: [PATCH] Simplify allocations and fix merge and continue actions [BZ
+ #28931]
+
+Allocations for address tuples is currently a bit confusing because of
+the pointer chasing through PAT, making it hard to observe the sequence
+in which allocations have been made. Narrow scope of the pointer
+chasing through PAT so that it is only used where necessary.
+
+This also tightens actions behaviour with the hosts database in
+getaddrinfo to comply with the manual text. The "continue" action
+discards previous results and the "merge" action results in an immedate
+lookup failure. Consequently, chaining of allocations across modules is
+no longer necessary, thus opening up cleanup opportunities.
+
+A test has been added that checks some combinations to ensure that they
+work correctly.
+
+Resolves: BZ #28931
+
+CVE: CVE-2023-4813
+Upstream-Status: Backport [https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=1c37b8022e8763fedbb3f79c02e05c6acfe5a215]
+Comments: Hunks refreshed
+
+Signed-off-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
+Reviewed-by: DJ Delorie <dj@redhat.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ nss/Makefile | 1 +
+ nss/tst-nss-gai-actions.c | 149 ++++++
+ nss/tst-nss-gai-actions.root/etc/host.conf | 1 +
+ nss/tst-nss-gai-actions.root/etc/hosts | 508 +++++++++++++++++++++
+ sysdeps/posix/getaddrinfo.c | 143 +++---
+ 5 files changed, 750 insertions(+), 52 deletions(-)
+ create mode 100644 nss/tst-nss-gai-actions.c
+ create mode 100644 nss/tst-nss-gai-actions.root/etc/host.conf
+ create mode 100644 nss/tst-nss-gai-actions.root/etc/hosts
+
+diff --git a/nss/Makefile b/nss/Makefile
+index 42a59535cb..d8b06b44fb 100644
+--- a/nss/Makefile
++++ b/nss/Makefile
+@@ -61,6 +61,7 @@
+
+ tests-container = \
+ tst-nss-test3 \
++ tst-nss-gai-actions \
+ tst-nss-files-hosts-long \
+ tst-nss-db-endpwent \
+ tst-nss-db-endgrent
+diff --git a/nss/tst-nss-gai-actions.c b/nss/tst-nss-gai-actions.c
+new file mode 100644
+index 0000000000..efca6cd183
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.c
+@@ -0,0 +1,149 @@
++/* Test continue and merge NSS actions for getaddrinfo.
++ Copyright The GNU Toolchain Authors.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <https://www.gnu.org/licenses/>. */
++
++#include <dlfcn.h>
++#include <gnu/lib-names.h>
++#include <nss.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++
++#include <support/check.h>
++#include <support/format_nss.h>
++#include <support/support.h>
++#include <support/xstdio.h>
++#include <support/xunistd.h>
++
++enum
++{
++ ACTION_MERGE = 0,
++ ACTION_CONTINUE,
++};
++
++static const char *
++family_str (int family)
++{
++ switch (family)
++ {
++ case AF_UNSPEC:
++ return "AF_UNSPEC";
++ case AF_INET:
++ return "AF_INET";
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static const char *
++action_str (int action)
++{
++ switch (action)
++ {
++ case ACTION_MERGE:
++ return "merge";
++ case ACTION_CONTINUE:
++ return "continue";
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static void
++do_one_test (int action, int family, bool canon)
++{
++ struct addrinfo hints =
++ {
++ .ai_family = family,
++ };
++
++ struct addrinfo *ai;
++
++ if (canon)
++ hints.ai_flags = AI_CANONNAME;
++
++ printf ("***** Testing \"files [SUCCESS=%s] files\" for family %s, %s\n",
++ action_str (action), family_str (family),
++ canon ? "AI_CANONNAME" : "");
++
++ int ret = getaddrinfo ("example.org", "80", &hints, &ai);
++
++ switch (action)
++ {
++ case ACTION_MERGE:
++ if (ret == 0)
++ {
++ char *formatted = support_format_addrinfo (ai, ret);
++
++ printf ("merge unexpectedly succeeded:\n %s\n", formatted);
++ support_record_failure ();
++ free (formatted);
++ }
++ else
++ return;
++ case ACTION_CONTINUE:
++ {
++ char *formatted = support_format_addrinfo (ai, ret);
++
++ /* Verify that the result appears exactly once. */
++ const char *expected = "address: STREAM/TCP 192.0.0.1 80\n"
++ "address: DGRAM/UDP 192.0.0.1 80\n"
++ "address: RAW/IP 192.0.0.1 80\n";
++
++ const char *contains = strstr (formatted, expected);
++ const char *contains2 = NULL;
++
++ if (contains != NULL)
++ contains2 = strstr (contains + strlen (expected), expected);
++
++ if (contains == NULL || contains2 != NULL)
++ {
++ printf ("continue failed:\n%s\n", formatted);
++ support_record_failure ();
++ }
++
++ free (formatted);
++ break;
++ }
++ default:
++ __builtin_unreachable ();
++ }
++}
++
++static void
++do_one_test_set (int action)
++{
++ char buf[32];
++
++ snprintf (buf, sizeof (buf), "files [SUCCESS=%s] files",
++ action_str (action));
++ __nss_configure_lookup ("hosts", buf);
++
++ do_one_test (action, AF_UNSPEC, false);
++ do_one_test (action, AF_INET, false);
++ do_one_test (action, AF_INET, true);
++}
++
++static int
++do_test (void)
++{
++ do_one_test_set (ACTION_CONTINUE);
++ do_one_test_set (ACTION_MERGE);
++ return 0;
++}
++
++#include <support/test-driver.c>
+diff --git a/nss/tst-nss-gai-actions.root/etc/host.conf b/nss/tst-nss-gai-actions.root/etc/host.conf
+new file mode 100644
+index 0000000000..d1a59f73a9
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.root/etc/host.conf
+@@ -0,0 +1 @@
++multi on
+diff --git a/nss/tst-nss-gai-actions.root/etc/hosts b/nss/tst-nss-gai-actions.root/etc/hosts
+new file mode 100644
+index 0000000000..50ce9774dc
+--- /dev/null
++++ b/nss/tst-nss-gai-actions.root/etc/hosts
+@@ -0,0 +1,508 @@
++192.0.0.1 example.org
++192.0.0.2 example.org
++192.0.0.3 example.org
++192.0.0.4 example.org
++192.0.0.5 example.org
++192.0.0.6 example.org
++192.0.0.7 example.org
++192.0.0.8 example.org
++192.0.0.9 example.org
++192.0.0.10 example.org
++192.0.0.11 example.org
++192.0.0.12 example.org
++192.0.0.13 example.org
++192.0.0.14 example.org
++192.0.0.15 example.org
++192.0.0.16 example.org
++192.0.0.17 example.org
++192.0.0.18 example.org
++192.0.0.19 example.org
++192.0.0.20 example.org
++192.0.0.21 example.org
++192.0.0.22 example.org
++192.0.0.23 example.org
++192.0.0.24 example.org
++192.0.0.25 example.org
++192.0.0.26 example.org
++192.0.0.27 example.org
++192.0.0.28 example.org
++192.0.0.29 example.org
++192.0.0.30 example.org
++192.0.0.31 example.org
++192.0.0.32 example.org
++192.0.0.33 example.org
++192.0.0.34 example.org
++192.0.0.35 example.org
++192.0.0.36 example.org
++192.0.0.37 example.org
++192.0.0.38 example.org
++192.0.0.39 example.org
++192.0.0.40 example.org
++192.0.0.41 example.org
++192.0.0.42 example.org
++192.0.0.43 example.org
++192.0.0.44 example.org
++192.0.0.45 example.org
++192.0.0.46 example.org
++192.0.0.47 example.org
++192.0.0.48 example.org
++192.0.0.49 example.org
++192.0.0.50 example.org
++192.0.0.51 example.org
++192.0.0.52 example.org
++192.0.0.53 example.org
++192.0.0.54 example.org
++192.0.0.55 example.org
++192.0.0.56 example.org
++192.0.0.57 example.org
++192.0.0.58 example.org
++192.0.0.59 example.org
++192.0.0.60 example.org
++192.0.0.61 example.org
++192.0.0.62 example.org
++192.0.0.63 example.org
++192.0.0.64 example.org
++192.0.0.65 example.org
++192.0.0.66 example.org
++192.0.0.67 example.org
++192.0.0.68 example.org
++192.0.0.69 example.org
++192.0.0.70 example.org
++192.0.0.71 example.org
++192.0.0.72 example.org
++192.0.0.73 example.org
++192.0.0.74 example.org
++192.0.0.75 example.org
++192.0.0.76 example.org
++192.0.0.77 example.org
++192.0.0.78 example.org
++192.0.0.79 example.org
++192.0.0.80 example.org
++192.0.0.81 example.org
++192.0.0.82 example.org
++192.0.0.83 example.org
++192.0.0.84 example.org
++192.0.0.85 example.org
++192.0.0.86 example.org
++192.0.0.87 example.org
++192.0.0.88 example.org
++192.0.0.89 example.org
++192.0.0.90 example.org
++192.0.0.91 example.org
++192.0.0.92 example.org
++192.0.0.93 example.org
++192.0.0.94 example.org
++192.0.0.95 example.org
++192.0.0.96 example.org
++192.0.0.97 example.org
++192.0.0.98 example.org
++192.0.0.99 example.org
++192.0.0.100 example.org
++192.0.0.101 example.org
++192.0.0.102 example.org
++192.0.0.103 example.org
++192.0.0.104 example.org
++192.0.0.105 example.org
++192.0.0.106 example.org
++192.0.0.107 example.org
++192.0.0.108 example.org
++192.0.0.109 example.org
++192.0.0.110 example.org
++192.0.0.111 example.org
++192.0.0.112 example.org
++192.0.0.113 example.org
++192.0.0.114 example.org
++192.0.0.115 example.org
++192.0.0.116 example.org
++192.0.0.117 example.org
++192.0.0.118 example.org
++192.0.0.119 example.org
++192.0.0.120 example.org
++192.0.0.121 example.org
++192.0.0.122 example.org
++192.0.0.123 example.org
++192.0.0.124 example.org
++192.0.0.125 example.org
++192.0.0.126 example.org
++192.0.0.127 example.org
++192.0.0.128 example.org
++192.0.0.129 example.org
++192.0.0.130 example.org
++192.0.0.131 example.org
++192.0.0.132 example.org
++192.0.0.133 example.org
++192.0.0.134 example.org
++192.0.0.135 example.org
++192.0.0.136 example.org
++192.0.0.137 example.org
++192.0.0.138 example.org
++192.0.0.139 example.org
++192.0.0.140 example.org
++192.0.0.141 example.org
++192.0.0.142 example.org
++192.0.0.143 example.org
++192.0.0.144 example.org
++192.0.0.145 example.org
++192.0.0.146 example.org
++192.0.0.147 example.org
++192.0.0.148 example.org
++192.0.0.149 example.org
++192.0.0.150 example.org
++192.0.0.151 example.org
++192.0.0.152 example.org
++192.0.0.153 example.org
++192.0.0.154 example.org
++192.0.0.155 example.org
++192.0.0.156 example.org
++192.0.0.157 example.org
++192.0.0.158 example.org
++192.0.0.159 example.org
++192.0.0.160 example.org
++192.0.0.161 example.org
++192.0.0.162 example.org
++192.0.0.163 example.org
++192.0.0.164 example.org
++192.0.0.165 example.org
++192.0.0.166 example.org
++192.0.0.167 example.org
++192.0.0.168 example.org
++192.0.0.169 example.org
++192.0.0.170 example.org
++192.0.0.171 example.org
++192.0.0.172 example.org
++192.0.0.173 example.org
++192.0.0.174 example.org
++192.0.0.175 example.org
++192.0.0.176 example.org
++192.0.0.177 example.org
++192.0.0.178 example.org
++192.0.0.179 example.org
++192.0.0.180 example.org
++192.0.0.181 example.org
++192.0.0.182 example.org
++192.0.0.183 example.org
++192.0.0.184 example.org
++192.0.0.185 example.org
++192.0.0.186 example.org
++192.0.0.187 example.org
++192.0.0.188 example.org
++192.0.0.189 example.org
++192.0.0.190 example.org
++192.0.0.191 example.org
++192.0.0.192 example.org
++192.0.0.193 example.org
++192.0.0.194 example.org
++192.0.0.195 example.org
++192.0.0.196 example.org
++192.0.0.197 example.org
++192.0.0.198 example.org
++192.0.0.199 example.org
++192.0.0.200 example.org
++192.0.0.201 example.org
++192.0.0.202 example.org
++192.0.0.203 example.org
++192.0.0.204 example.org
++192.0.0.205 example.org
++192.0.0.206 example.org
++192.0.0.207 example.org
++192.0.0.208 example.org
++192.0.0.209 example.org
++192.0.0.210 example.org
++192.0.0.211 example.org
++192.0.0.212 example.org
++192.0.0.213 example.org
++192.0.0.214 example.org
++192.0.0.215 example.org
++192.0.0.216 example.org
++192.0.0.217 example.org
++192.0.0.218 example.org
++192.0.0.219 example.org
++192.0.0.220 example.org
++192.0.0.221 example.org
++192.0.0.222 example.org
++192.0.0.223 example.org
++192.0.0.224 example.org
++192.0.0.225 example.org
++192.0.0.226 example.org
++192.0.0.227 example.org
++192.0.0.228 example.org
++192.0.0.229 example.org
++192.0.0.230 example.org
++192.0.0.231 example.org
++192.0.0.232 example.org
++192.0.0.233 example.org
++192.0.0.234 example.org
++192.0.0.235 example.org
++192.0.0.236 example.org
++192.0.0.237 example.org
++192.0.0.238 example.org
++192.0.0.239 example.org
++192.0.0.240 example.org
++192.0.0.241 example.org
++192.0.0.242 example.org
++192.0.0.243 example.org
++192.0.0.244 example.org
++192.0.0.245 example.org
++192.0.0.246 example.org
++192.0.0.247 example.org
++192.0.0.248 example.org
++192.0.0.249 example.org
++192.0.0.250 example.org
++192.0.0.251 example.org
++192.0.0.252 example.org
++192.0.0.253 example.org
++192.0.0.254 example.org
++192.0.1.1 example.org
++192.0.1.2 example.org
++192.0.1.3 example.org
++192.0.1.4 example.org
++192.0.1.5 example.org
++192.0.1.6 example.org
++192.0.1.7 example.org
++192.0.1.8 example.org
++192.0.1.9 example.org
++192.0.1.10 example.org
++192.0.1.11 example.org
++192.0.1.12 example.org
++192.0.1.13 example.org
++192.0.1.14 example.org
++192.0.1.15 example.org
++192.0.1.16 example.org
++192.0.1.17 example.org
++192.0.1.18 example.org
++192.0.1.19 example.org
++192.0.1.20 example.org
++192.0.1.21 example.org
++192.0.1.22 example.org
++192.0.1.23 example.org
++192.0.1.24 example.org
++192.0.1.25 example.org
++192.0.1.26 example.org
++192.0.1.27 example.org
++192.0.1.28 example.org
++192.0.1.29 example.org
++192.0.1.30 example.org
++192.0.1.31 example.org
++192.0.1.32 example.org
++192.0.1.33 example.org
++192.0.1.34 example.org
++192.0.1.35 example.org
++192.0.1.36 example.org
++192.0.1.37 example.org
++192.0.1.38 example.org
++192.0.1.39 example.org
++192.0.1.40 example.org
++192.0.1.41 example.org
++192.0.1.42 example.org
++192.0.1.43 example.org
++192.0.1.44 example.org
++192.0.1.45 example.org
++192.0.1.46 example.org
++192.0.1.47 example.org
++192.0.1.48 example.org
++192.0.1.49 example.org
++192.0.1.50 example.org
++192.0.1.51 example.org
++192.0.1.52 example.org
++192.0.1.53 example.org
++192.0.1.54 example.org
++192.0.1.55 example.org
++192.0.1.56 example.org
++192.0.1.57 example.org
++192.0.1.58 example.org
++192.0.1.59 example.org
++192.0.1.60 example.org
++192.0.1.61 example.org
++192.0.1.62 example.org
++192.0.1.63 example.org
++192.0.1.64 example.org
++192.0.1.65 example.org
++192.0.1.66 example.org
++192.0.1.67 example.org
++192.0.1.68 example.org
++192.0.1.69 example.org
++192.0.1.70 example.org
++192.0.1.71 example.org
++192.0.1.72 example.org
++192.0.1.73 example.org
++192.0.1.74 example.org
++192.0.1.75 example.org
++192.0.1.76 example.org
++192.0.1.77 example.org
++192.0.1.78 example.org
++192.0.1.79 example.org
++192.0.1.80 example.org
++192.0.1.81 example.org
++192.0.1.82 example.org
++192.0.1.83 example.org
++192.0.1.84 example.org
++192.0.1.85 example.org
++192.0.1.86 example.org
++192.0.1.87 example.org
++192.0.1.88 example.org
++192.0.1.89 example.org
++192.0.1.90 example.org
++192.0.1.91 example.org
++192.0.1.92 example.org
++192.0.1.93 example.org
++192.0.1.94 example.org
++192.0.1.95 example.org
++192.0.1.96 example.org
++192.0.1.97 example.org
++192.0.1.98 example.org
++192.0.1.99 example.org
++192.0.1.100 example.org
++192.0.1.101 example.org
++192.0.1.102 example.org
++192.0.1.103 example.org
++192.0.1.104 example.org
++192.0.1.105 example.org
++192.0.1.106 example.org
++192.0.1.107 example.org
++192.0.1.108 example.org
++192.0.1.109 example.org
++192.0.1.110 example.org
++192.0.1.111 example.org
++192.0.1.112 example.org
++192.0.1.113 example.org
++192.0.1.114 example.org
++192.0.1.115 example.org
++192.0.1.116 example.org
++192.0.1.117 example.org
++192.0.1.118 example.org
++192.0.1.119 example.org
++192.0.1.120 example.org
++192.0.1.121 example.org
++192.0.1.122 example.org
++192.0.1.123 example.org
++192.0.1.124 example.org
++192.0.1.125 example.org
++192.0.1.126 example.org
++192.0.1.127 example.org
++192.0.1.128 example.org
++192.0.1.129 example.org
++192.0.1.130 example.org
++192.0.1.131 example.org
++192.0.1.132 example.org
++192.0.1.133 example.org
++192.0.1.134 example.org
++192.0.1.135 example.org
++192.0.1.136 example.org
++192.0.1.137 example.org
++192.0.1.138 example.org
++192.0.1.139 example.org
++192.0.1.140 example.org
++192.0.1.141 example.org
++192.0.1.142 example.org
++192.0.1.143 example.org
++192.0.1.144 example.org
++192.0.1.145 example.org
++192.0.1.146 example.org
++192.0.1.147 example.org
++192.0.1.148 example.org
++192.0.1.149 example.org
++192.0.1.150 example.org
++192.0.1.151 example.org
++192.0.1.152 example.org
++192.0.1.153 example.org
++192.0.1.154 example.org
++192.0.1.155 example.org
++192.0.1.156 example.org
++192.0.1.157 example.org
++192.0.1.158 example.org
++192.0.1.159 example.org
++192.0.1.160 example.org
++192.0.1.161 example.org
++192.0.1.162 example.org
++192.0.1.163 example.org
++192.0.1.164 example.org
++192.0.1.165 example.org
++192.0.1.166 example.org
++192.0.1.167 example.org
++192.0.1.168 example.org
++192.0.1.169 example.org
++192.0.1.170 example.org
++192.0.1.171 example.org
++192.0.1.172 example.org
++192.0.1.173 example.org
++192.0.1.174 example.org
++192.0.1.175 example.org
++192.0.1.176 example.org
++192.0.1.177 example.org
++192.0.1.178 example.org
++192.0.1.179 example.org
++192.0.1.180 example.org
++192.0.1.181 example.org
++192.0.1.182 example.org
++192.0.1.183 example.org
++192.0.1.184 example.org
++192.0.1.185 example.org
++192.0.1.186 example.org
++192.0.1.187 example.org
++192.0.1.188 example.org
++192.0.1.189 example.org
++192.0.1.190 example.org
++192.0.1.191 example.org
++192.0.1.192 example.org
++192.0.1.193 example.org
++192.0.1.194 example.org
++192.0.1.195 example.org
++192.0.1.196 example.org
++192.0.1.197 example.org
++192.0.1.198 example.org
++192.0.1.199 example.org
++192.0.1.200 example.org
++192.0.1.201 example.org
++192.0.1.202 example.org
++192.0.1.203 example.org
++192.0.1.204 example.org
++192.0.1.205 example.org
++192.0.1.206 example.org
++192.0.1.207 example.org
++192.0.1.208 example.org
++192.0.1.209 example.org
++192.0.1.210 example.org
++192.0.1.211 example.org
++192.0.1.212 example.org
++192.0.1.213 example.org
++192.0.1.214 example.org
++192.0.1.215 example.org
++192.0.1.216 example.org
++192.0.1.217 example.org
++192.0.1.218 example.org
++192.0.1.219 example.org
++192.0.1.220 example.org
++192.0.1.221 example.org
++192.0.1.222 example.org
++192.0.1.223 example.org
++192.0.1.224 example.org
++192.0.1.225 example.org
++192.0.1.226 example.org
++192.0.1.227 example.org
++192.0.1.228 example.org
++192.0.1.229 example.org
++192.0.1.230 example.org
++192.0.1.231 example.org
++192.0.1.232 example.org
++192.0.1.233 example.org
++192.0.1.234 example.org
++192.0.1.235 example.org
++192.0.1.236 example.org
++192.0.1.237 example.org
++192.0.1.238 example.org
++192.0.1.239 example.org
++192.0.1.240 example.org
++192.0.1.241 example.org
++192.0.1.242 example.org
++192.0.1.243 example.org
++192.0.1.244 example.org
++192.0.1.245 example.org
++192.0.1.246 example.org
++192.0.1.247 example.org
++192.0.1.248 example.org
++192.0.1.249 example.org
++192.0.1.250 example.org
++192.0.1.251 example.org
++192.0.1.252 example.org
++192.0.1.253 example.org
++192.0.1.254 example.org
+diff --git a/sysdeps/posix/getaddrinfo.c b/sysdeps/posix/getaddrinfo.c
+index 18dccd5924..3d9bea60c6 100644
+--- a/sysdeps/posix/getaddrinfo.c
++++ b/sysdeps/posix/getaddrinfo.c
+@@ -458,11 +458,6 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ if (name != NULL)
+ {
+- at = alloca_account (sizeof (struct gaih_addrtuple), alloca_used);
+- at->family = AF_UNSPEC;
+- at->scopeid = 0;
+- at->next = NULL;
+-
+ if (req->ai_flags & AI_IDN)
+ {
+ char *out;
+@@ -473,13 +468,21 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ malloc_name = true;
+ }
+
+- if (__inet_aton_exact (name, (struct in_addr *) at->addr) != 0)
++ uint32_t addr[4];
++ if (__inet_aton_exact (name, (struct in_addr *) addr) != 0)
+ {
++ at = alloca_account (sizeof (struct gaih_addrtuple), alloca_used);
++ at->scopeid = 0;
++ at->next = NULL;
++
+ if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET)
+- at->family = AF_INET;
++ {
++ memcpy (at->addr, addr, sizeof (at->addr));
++ at->family = AF_INET;
++ }
+ else if (req->ai_family == AF_INET6 && (req->ai_flags & AI_V4MAPPED))
+ {
+- at->addr[3] = at->addr[0];
++ at->addr[3] = addr[0];
+ at->addr[2] = htonl (0xffff);
+ at->addr[1] = 0;
+ at->addr[0] = 0;
+@@ -505,49 +505,62 @@
+
+ if (req->ai_flags & AI_CANONNAME)
+ canon = name;
++
++ goto process_list;
+ }
+- else if (at->family == AF_UNSPEC)
++
++ char *scope_delim = strchr (name, SCOPE_DELIMITER);
++ int e;
++
++ if (scope_delim == NULL)
++ e = inet_pton (AF_INET6, name, addr);
++ else
++ e = __inet_pton_length (AF_INET6, name, scope_delim - name, addr);
++
++ if (e > 0)
+ {
+- char *scope_delim = strchr (name, SCOPE_DELIMITER);
+- int e;
+- if (scope_delim == NULL)
+- e = inet_pton (AF_INET6, name, at->addr);
++ at = alloca_account (sizeof (struct gaih_addrtuple),
++ alloca_used);
++ at->scopeid = 0;
++ at->next = NULL;
++
++ if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET6)
++ {
++ memcpy (at->addr, addr, sizeof (at->addr));
++ at->family = AF_INET6;
++ }
++ else if (req->ai_family == AF_INET
++ && IN6_IS_ADDR_V4MAPPED (addr))
++ {
++ at->addr[0] = addr[3];
++ at->addr[1] = addr[1];
++ at->addr[2] = addr[2];
++ at->addr[3] = addr[3];
++ at->family = AF_INET;
++ }
+ else
+- e = __inet_pton_length (AF_INET6, name, scope_delim - name,
+- at->addr);
+- if (e > 0)
+ {
+- if (req->ai_family == AF_UNSPEC || req->ai_family == AF_INET6)
+- at->family = AF_INET6;
+- else if (req->ai_family == AF_INET
+- && IN6_IS_ADDR_V4MAPPED (at->addr))
+- {
+- at->addr[0] = at->addr[3];
+- at->family = AF_INET;
+- }
+- else
+- {
+- result = -EAI_ADDRFAMILY;
+- goto free_and_return;
+- }
+-
+- if (scope_delim != NULL
+- && __inet6_scopeid_pton ((struct in6_addr *) at->addr,
+- scope_delim + 1,
+- &at->scopeid) != 0)
+- {
+- result = -EAI_NONAME;
+- goto free_and_return;
+- }
++ result = -EAI_ADDRFAMILY;
++ goto free_and_return;
++ }
+
+- if (req->ai_flags & AI_CANONNAME)
+- canon = name;
++ if (scope_delim != NULL
++ && __inet6_scopeid_pton ((struct in6_addr *) at->addr,
++ scope_delim + 1,
++ &at->scopeid) != 0)
++ {
++ result = -EAI_NONAME;
++ goto free_and_return;
+ }
++
++ if (req->ai_flags & AI_CANONNAME)
++ canon = name;
++
++ goto process_list;
+ }
+
+- if (at->family == AF_UNSPEC && (req->ai_flags & AI_NUMERICHOST) == 0)
++ if ((req->ai_flags & AI_NUMERICHOST) == 0)
+ {
+- struct gaih_addrtuple **pat = &at;
+ int no_data = 0;
+ int no_inet6_data = 0;
+ service_user *nip;
+@@ -543,6 +559,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ enum nss_status status = NSS_STATUS_UNAVAIL;
+ int no_more;
+ struct resolv_context *res_ctx = NULL;
++ bool do_merge = false;
+
+ /* If we do not have to look for IPv6 addresses or the canonical
+ name, use the simple, old functions, which do not support
+@@ -579,7 +596,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ result = -EAI_MEMORY;
+ goto free_and_return;
+ }
+- *pat = addrmem;
++ at = addrmem;
+ }
+ else
+ {
+@@ -632,6 +649,8 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ }
+
+ struct gaih_addrtuple *addrfree = addrmem;
++ struct gaih_addrtuple **pat = &at;
++
+ for (int i = 0; i < air->naddrs; ++i)
+ {
+ socklen_t size = (air->family[i] == AF_INET
+@@ -695,12 +714,6 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ free (air);
+
+- if (at->family == AF_UNSPEC)
+- {
+- result = -EAI_NONAME;
+- goto free_and_return;
+- }
+-
+ goto process_list;
+ }
+ else if (err == 0)
+@@ -750,6 +763,22 @@
+
+ while (!no_more)
+ {
++ /* Always start afresh; continue should discard previous results
++ and the hosts database does not support merge. */
++ at = NULL;
++ free (canonbuf);
++ free (addrmem);
++ canon = canonbuf = NULL;
++ addrmem = NULL;
++ got_ipv6 = false;
++
++ if (do_merge)
++ {
++ __set_h_errno (NETDB_INTERNAL);
++ __set_errno (EBUSY);
++ break;
++ }
++
+ no_data = 0;
+ nss_gethostbyname4_r fct4 = NULL;
+
+@@ -744,12 +773,14 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ {
+ while (1)
+ {
+- status = DL_CALL_FCT (fct4, (name, pat,
++ status = DL_CALL_FCT (fct4, (name, &at,
+ tmpbuf->data, tmpbuf->length,
+ &errno, &h_errno,
+ NULL));
+ if (status == NSS_STATUS_SUCCESS)
+ break;
++ /* gethostbyname4_r may write into AT, so reset it. */
++ at = NULL;
+ if (status != NSS_STATUS_TRYAGAIN
+ || errno != ERANGE || h_errno != NETDB_INTERNAL)
+ {
+@@ -774,7 +805,9 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ no_data = 1;
+
+ if ((req->ai_flags & AI_CANONNAME) != 0 && canon == NULL)
+- canon = (*pat)->name;
++ canon = at->name;
++
++ struct gaih_addrtuple **pat = &at;
+
+ while (*pat != NULL)
+ {
+@@ -826,6 +859,8 @@ gaih_inet (const char *name, const struct gaih_service *service,
+
+ if (fct != NULL)
+ {
++ struct gaih_addrtuple **pat = &at;
++
+ if (req->ai_family == AF_INET6
+ || req->ai_family == AF_UNSPEC)
+ {
+@@ -917,6 +946,10 @@
+ if (nss_next_action (nip, status) == NSS_ACTION_RETURN)
+ break;
+
++ /* The hosts database does not support MERGE. */
++ if (nss_next_action (nip, status) == NSS_ACTION_MERGE)
++ do_merge = true;
++
+ if (nip->next == NULL)
+ no_more = -1;
+ else
+@@ -930,7 +969,7 @@ gaih_inet (const char *name, const struct gaih_service *service,
+ }
+
+ process_list:
+- if (at->family == AF_UNSPEC)
++ if (at == NULL)
+ {
+ result = -EAI_NONAME;
+ goto free_and_return;
+--
+2.39.3
diff --git a/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch b/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch
new file mode 100644
index 0000000000..4d3146509a
--- /dev/null
+++ b/meta/recipes-core/glibc/glibc/CVE-2023-4911.patch
@@ -0,0 +1,63 @@
+From d2b77337f734fcacdfc8e0ddec14cf31a746c7be Mon Sep 17 00:00:00 2001
+From: Siddhesh Poyarekar <siddhesh@redhat.com>
+Date: Mon, 11 Sep 2023 18:53:15 -0400
+Subject: [PATCH v2] tunables: Terminate immediately if end of input is reached
+
+The string parsing routine may end up writing beyond bounds of tunestr
+if the input tunable string is malformed, of the form name=name=val.
+This gets processed twice, first as name=name=val and next as name=val,
+resulting in tunestr being name=name=val:name=val, thus overflowing
+tunestr.
+
+Terminate the parsing loop at the first instance itself so that tunestr
+does not overflow.
+---
+Changes from v1:
+
+- Also null-terminate tunestr before exiting.
+
+ elf/dl-tunables.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+Upstream-Status: Backport [git://sourceware.org/git/glibc.git]
+CVE: CVE-2023-4911
+
+diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c
+index 8e7ee9df10..76cf8b9da3 100644
+--- a/elf/dl-tunables.c
++++ b/elf/dl-tunables.c
+@@ -187,11 +187,7 @@ parse_tunables (char *tunestr, char *valstring)
+ /* If we reach the end of the string before getting a valid name-value
+ pair, bail out. */
+ if (p[len] == '\0')
+- {
+- if (__libc_enable_secure)
+- tunestr[off] = '\0';
+- return;
+- }
++ break;
+
+ /* We did not find a valid name-value pair before encountering the
+ colon. */
+@@ -251,9 +247,16 @@ parse_tunables (char *tunestr, char *valstring)
+ }
+ }
+
+- if (p[len] != '\0')
+- p += len + 1;
++ /* We reached the end while processing the tunable string. */
++ if (p[len] == '\0')
++ break;
++
++ p += len + 1;
+ }
++
++ /* Terminate tunestr before we leave. */
++ if (__libc_enable_secure)
++ tunestr[off] = '\0';
+ }
+ #endif
+
+--
+2.41.0
+
diff --git a/meta/recipes-core/glibc/glibc/check-test-wrapper b/meta/recipes-core/glibc/glibc/check-test-wrapper
index 6ec9b9b29e..5cc993f718 100644
--- a/meta/recipes-core/glibc/glibc/check-test-wrapper
+++ b/meta/recipes-core/glibc/glibc/check-test-wrapper
@@ -58,7 +58,7 @@ elif targettype == "ssh":
user = os.environ.get("SSH_HOST_USER", None)
port = os.environ.get("SSH_HOST_PORT", None)
- command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]
+ command = ["ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=quiet"]
if port:
command += ["-p", str(port)]
if not host:
diff --git a/meta/recipes-core/glibc/glibc_2.31.bb b/meta/recipes-core/glibc/glibc_2.31.bb
index 0c37467fe4..296c892994 100644
--- a/meta/recipes-core/glibc/glibc_2.31.bb
+++ b/meta/recipes-core/glibc/glibc_2.31.bb
@@ -29,6 +29,13 @@ CVE_CHECK_WHITELIST += "CVE-2019-1010025"
# https://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?h=dunfell&id=e1e89ff7d75c3d2223f9e3bd875b9b0c5e15836b
CVE_CHECK_WHITELIST += "CVE-2021-35942"
+# glibc https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2023-4527
+# This vulnerability was introduced in 2.36 by commit
+# f282cdbe7f436c75864e5640a409a10485e9abb2 resolv: Implement no-aaaa stub resolver option
+# so our version is not yet vulnerable
+# See https://sourceware.org/bugzilla/show_bug.cgi?id=30842
+CVE_CHECK_WHITELIST += "CVE-2023-4527"
+
DEPENDS += "gperf-native bison-native make-native"
NATIVESDKFIXES ?= ""
@@ -79,6 +86,9 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
file://0035-x86_64-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch \
file://0036-i386-Avoid-lazy-relocation-of-tlsdesc-BZ-27137.patch \
file://0037-Avoid-deadlock-between-pthread_create-and-ctors.patch \
+ file://CVE-2023-0687.patch \
+ file://CVE-2023-4911.patch \
+ file://CVE-2023-4813.patch \
"
S = "${WORKDIR}/git"
B = "${WORKDIR}/build-${TARGET_SYS}"
diff --git a/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index 085b3f98e7..035312f4d9 100644
--- a/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -24,7 +24,7 @@ IMAGE_FSTYPES = "wic.vmdk"
inherit core-image setuptools3
-SRCREV ?= "8a7fd5f633a2b72185501d4c4a8a51ed1fc7cea1"
+SRCREV ?= "77442211926cbe93d60108f6df4abda3bc06b735"
SRC_URI = "git://git.yoctoproject.org/poky;branch=dunfell \
file://Yocto_Build_Appliance.vmx \
file://Yocto_Build_Appliance.vmxf \
diff --git a/meta/recipes-core/initrdscripts/initramfs-framework/finish b/meta/recipes-core/initrdscripts/initramfs-framework/finish
index 717383ebac..dee3ab3387 100755
--- a/meta/recipes-core/initrdscripts/initramfs-framework/finish
+++ b/meta/recipes-core/initrdscripts/initramfs-framework/finish
@@ -14,6 +14,15 @@ finish_run() {
info "Switching root to '$ROOTFS_DIR'..."
+ debug "Moving basic mounts onto rootfs"
+ for dir in `awk '/\/dev.* \/run\/media/{print $2}' /proc/mounts`; do
+ # Parse any OCT or HEX encoded chars such as spaces
+ # in the mount points to actual ASCII chars
+ dir=`printf $dir`
+ mkdir -p "${ROOTFS_DIR}/media/${dir##*/}"
+ mount -n --move "$dir" "${ROOTFS_DIR}/media/${dir##*/}"
+ done
+
debug "Moving /dev, /proc and /sys onto rootfs..."
mount --move /dev $ROOTFS_DIR/dev
mount --move /proc $ROOTFS_DIR/proc
diff --git a/meta/recipes-core/initscripts/initscripts_1.0.bb b/meta/recipes-core/initscripts/initscripts_1.0.bb
index f98e42eb2e..cb5417cc39 100644
--- a/meta/recipes-core/initscripts/initscripts_1.0.bb
+++ b/meta/recipes-core/initscripts/initscripts_1.0.bb
@@ -129,7 +129,7 @@ do_install () {
update-rc.d -r ${D} rmnologin.sh start 99 2 3 4 5 .
update-rc.d -r ${D} sendsigs start 20 0 6 .
update-rc.d -r ${D} urandom start 38 S 0 6 .
- update-rc.d -r ${D} umountnfs.sh start 31 0 1 6 .
+ update-rc.d -r ${D} umountnfs.sh stop 31 0 1 6 .
update-rc.d -r ${D} umountfs start 40 0 6 .
update-rc.d -r ${D} reboot start 90 6 .
update-rc.d -r ${D} halt start 90 0 .
diff --git a/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
new file mode 100644
index 0000000000..b0d26d1c08
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/0001-Port-gentest.py-to-Python-3.patch
@@ -0,0 +1,813 @@
+From b5125000917810731bc28055c0445d571121f80e Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 21 Apr 2022 00:45:58 +0200
+Subject: [PATCH] Port gentest.py to Python 3
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/343fc1421cdae097fa6c4cffeb1a065a40be6bbb]
+
+* fixes:
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 11
+ print "libxml2 python bindings not available, skipping testapi.c generation"
+ ^
+SyntaxError: Missing parentheses in call to 'print'. Did you mean print("libxml2 python bindings not available, skipping testapi.c generation")?
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+make[1]: 'testReader' is up to date.
+ File "../libxml2-2.9.10/gentest.py", line 271
+ return 1
+ ^
+TabError: inconsistent use of tabs and spaces in indentation
+make[1]: [Makefile:2078: testapi.c] Error 1 (ignored)
+
+...
+
+aarch64-oe-linux-gcc: error: testapi.c: No such file or directory
+aarch64-oe-linux-gcc: fatal error: no input files
+compilation terminated.
+make[1]: *** [Makefile:1275: testapi.o] Error 1
+
+But there is still a bit mystery why it worked before, because check-am
+calls gentest.py with $(PYTHON), so it ignores the shebang in the script
+and libxml2 is using python3native (through python3targetconfig.bbclass)
+so something like:
+
+libxml2/2.9.10-r0/recipe-sysroot-native/usr/bin/python3-native/python3 gentest.py
+
+But that still fails (now without SyntaxError) with:
+libxml2 python bindings not available, skipping testapi.c generation
+
+because we don't have dependency on libxml2-native (to provide libxml2
+python bindings form python3native) and exported PYTHON_SITE_PACKAGES
+might be useless (e.g. /usr/lib/python3.8/site-packages on Ubuntu-22.10
+which uses python 3.10 and there is no site-packages with libxml2)
+
+Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
+---
+ gentest.py | 421 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 209 insertions(+), 212 deletions(-)
+
+diff --git a/gentest.py b/gentest.py
+index b763300..0756706 100755
+--- a/gentest.py
++++ b/gentest.py
+@@ -8,7 +8,7 @@ import string
+ try:
+ import libxml2
+ except:
+- print "libxml2 python bindings not available, skipping testapi.c generation"
++ print("libxml2 python bindings not available, skipping testapi.c generation")
+ sys.exit(0)
+
+ if len(sys.argv) > 1:
+@@ -227,7 +227,7 @@ extra_post_call = {
+ if (old != NULL) {
+ xmlUnlinkNode(old);
+ xmlFreeNode(old) ; old = NULL ; }
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlTextMerge":
+ """if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
+ xmlUnlinkNode(second);
+@@ -236,7 +236,7 @@ extra_post_call = {
+ """if ((ret_val != NULL) && (ret_val != ncname) &&
+ (ret_val != prefix) && (ret_val != memory))
+ xmlFree(ret_val);
+- ret_val = NULL;""",
++\t ret_val = NULL;""",
+ "xmlNewDocElementContent":
+ """xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
+ "xmlDictReference": "xmlDictFree(dict);",
+@@ -268,29 +268,29 @@ modules = []
+ def is_skipped_module(name):
+ for mod in skipped_modules:
+ if mod == name:
+- return 1
++ return 1
+ return 0
+
+ def is_skipped_function(name):
+ for fun in skipped_functions:
+ if fun == name:
+- return 1
++ return 1
+ # Do not test destructors
+- if string.find(name, 'Free') != -1:
++ if name.find('Free') != -1:
+ return 1
+ return 0
+
+ def is_skipped_memcheck(name):
+ for fun in skipped_memcheck:
+ if fun == name:
+- return 1
++ return 1
+ return 0
+
+ missing_types = {}
+ def add_missing_type(name, func):
+ try:
+ list = missing_types[name]
+- list.append(func)
++ list.append(func)
+ except:
+ missing_types[name] = [func]
+
+@@ -310,7 +310,7 @@ def add_missing_functions(name, module):
+ missing_functions_nr = missing_functions_nr + 1
+ try:
+ list = missing_functions[module]
+- list.append(name)
++ list.append(name)
+ except:
+ missing_functions[module] = [name]
+
+@@ -319,45 +319,45 @@ def add_missing_functions(name, module):
+ #
+
+ def type_convert(str, name, info, module, function, pos):
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+-# res = string.replace(str, " ", " ")
+- res = string.replace(str, " *", "_ptr")
+-# res = string.replace(str, "*", "_ptr")
+- res = string.replace(res, " ", "_")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++# res = str.replace(" ", " ")
++ res = str.replace(" *", "_ptr")
++# res = str.replace("*", "_ptr")
++ res = res.replace(" ", "_")
+ if res == 'const_char_ptr':
+- if string.find(name, "file") != -1 or \
+- string.find(name, "uri") != -1 or \
+- string.find(name, "URI") != -1 or \
+- string.find(info, "filename") != -1 or \
+- string.find(info, "URI") != -1 or \
+- string.find(info, "URL") != -1:
+- if string.find(function, "Save") != -1 or \
+- string.find(function, "Create") != -1 or \
+- string.find(function, "Write") != -1 or \
+- string.find(function, "Fetch") != -1:
+- return('fileoutput')
+- return('filepath')
++ if name.find("file") != -1 or \
++ name.find("uri") != -1 or \
++ name.find("URI") != -1 or \
++ info.find("filename") != -1 or \
++ info.find("URI") != -1 or \
++ info.find("URL") != -1:
++ if function.find("Save") != -1 or \
++ function.find("Create") != -1 or \
++ function.find("Write") != -1 or \
++ function.find("Fetch") != -1:
++ return('fileoutput')
++ return('filepath')
+ if res == 'void_ptr':
+ if module == 'nanoftp' and name == 'ctx':
+- return('xmlNanoFTPCtxtPtr')
++ return('xmlNanoFTPCtxtPtr')
+ if function == 'xmlNanoFTPNewCtxt' or \
+- function == 'xmlNanoFTPConnectTo' or \
+- function == 'xmlNanoFTPOpen':
+- return('xmlNanoFTPCtxtPtr')
++ function == 'xmlNanoFTPConnectTo' or \
++ function == 'xmlNanoFTPOpen':
++ return('xmlNanoFTPCtxtPtr')
+ if module == 'nanohttp' and name == 'ctx':
+- return('xmlNanoHTTPCtxtPtr')
+- if function == 'xmlNanoHTTPMethod' or \
+- function == 'xmlNanoHTTPMethodRedir' or \
+- function == 'xmlNanoHTTPOpen' or \
+- function == 'xmlNanoHTTPOpenRedir':
+- return('xmlNanoHTTPCtxtPtr');
++ return('xmlNanoHTTPCtxtPtr')
++ if function == 'xmlNanoHTTPMethod' or \
++ function == 'xmlNanoHTTPMethodRedir' or \
++ function == 'xmlNanoHTTPOpen' or \
++ function == 'xmlNanoHTTPOpenRedir':
++ return('xmlNanoHTTPCtxtPtr');
+ if function == 'xmlIOHTTPOpen':
+- return('xmlNanoHTTPCtxtPtr')
+- if string.find(name, "data") != -1:
+- return('userdata')
+- if string.find(name, "user") != -1:
+- return('userdata')
++ return('xmlNanoHTTPCtxtPtr')
++ if name.find("data") != -1:
++ return('userdata')
++ if name.find("user") != -1:
++ return('userdata')
+ if res == 'xmlDoc_ptr':
+ res = 'xmlDocPtr'
+ if res == 'xmlNode_ptr':
+@@ -366,18 +366,18 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'xmlDictPtr'
+ if res == 'xmlNodePtr' and pos != 0:
+ if (function == 'xmlAddChild' and pos == 2) or \
+- (function == 'xmlAddChildList' and pos == 2) or \
++ (function == 'xmlAddChildList' and pos == 2) or \
+ (function == 'xmlAddNextSibling' and pos == 2) or \
+ (function == 'xmlAddSibling' and pos == 2) or \
+ (function == 'xmlDocSetRootElement' and pos == 2) or \
+ (function == 'xmlReplaceNode' and pos == 2) or \
+ (function == 'xmlTextMerge') or \
+- (function == 'xmlAddPrevSibling' and pos == 2):
+- return('xmlNodePtr_in');
++ (function == 'xmlAddPrevSibling' and pos == 2):
++ return('xmlNodePtr_in');
+ if res == 'const xmlBufferPtr':
+ res = 'xmlBufferPtr'
+ if res == 'xmlChar_ptr' and name == 'name' and \
+- string.find(function, "EatName") != -1:
++ function.find("EatName") != -1:
+ return('eaten_name')
+ if res == 'void_ptr*':
+ res = 'void_ptr_ptr'
+@@ -393,7 +393,7 @@ def type_convert(str, name, info, module, function, pos):
+ res = 'debug_FILE_ptr';
+ if res == 'int' and name == 'options':
+ if module == 'parser' or module == 'xmlreader':
+- res = 'parseroptions'
++ res = 'parseroptions'
+
+ return res
+
+@@ -402,28 +402,28 @@ known_param_types = []
+ def is_known_param_type(name):
+ for type in known_param_types:
+ if type == name:
+- return 1
++ return 1
+ return name[-3:] == 'Ptr' or name[-4:] == '_ptr'
+
+ def generate_param_type(name, rtype):
+ global test
+ for type in known_param_types:
+ if type == name:
+- return
++ return
+ for type in generated_param_types:
+ if type == name:
+- return
++ return
+
+ if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
+ if rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
+
+ define = 0
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""
+ #define gen_nb_%s 1
+ static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+@@ -433,7 +433,7 @@ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTR
+ }
+ """ % (name, crtype, name, name, rtype))
+ if define == 1:
+- test.write("#endif\n\n")
++ test.write("#endif\n\n")
+ add_generated_param_type(name)
+
+ #
+@@ -445,7 +445,7 @@ known_return_types = []
+ def is_known_return_type(name):
+ for type in known_return_types:
+ if type == name:
+- return 1
++ return 1
+ return 0
+
+ #
+@@ -471,7 +471,7 @@ def compare_and_save():
+ try:
+ os.system("rm testapi.c; mv testapi.c.new testapi.c")
+ except:
+- os.system("mv testapi.c.new testapi.c")
++ os.system("mv testapi.c.new testapi.c")
+ print("Updated testapi.c")
+ else:
+ print("Generated testapi.c is identical")
+@@ -481,17 +481,17 @@ while line != "":
+ if line == "/* CUT HERE: everything below that line is generated */\n":
+ break;
+ if line[0:15] == "#define gen_nb_":
+- type = string.split(line[15:])[0]
+- known_param_types.append(type)
++ type = line[15:].split()[0]
++ known_param_types.append(type)
+ if line[0:19] == "static void desret_":
+- type = string.split(line[19:], '(')[0]
+- known_return_types.append(type)
++ type = line[19:].split('(')[0]
++ known_return_types.append(type)
+ test.write(line)
+ line = input.readline()
+ input.close()
+
+ if line == "":
+- print "Could not find the CUT marker in testapi.c skipping generation"
++ print("Could not find the CUT marker in testapi.c skipping generation")
+ test.close()
+ sys.exit(0)
+
+@@ -505,7 +505,7 @@ test.write("/* CUT HERE: everything below that line is generated */\n")
+ #
+ doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
+ if doc == None:
+- print "Failed to load doc/libxml2-api.xml"
++ print("Failed to load doc/libxml2-api.xml")
+ sys.exit(1)
+ ctxt = doc.xpathNewContext()
+
+@@ -519,9 +519,9 @@ for arg in args:
+ mod = arg.xpathEval('string(../@file)')
+ func = arg.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+- type = arg.xpathEval('string(@type)')
+- if not argtypes.has_key(type):
+- argtypes[type] = func
++ type = arg.xpathEval('string(@type)')
++ if type not in argtypes:
++ argtypes[type] = func
+
+ # similarly for return types
+ rettypes = {}
+@@ -531,8 +531,8 @@ for ret in rets:
+ func = ret.xpathEval('string(../@name)')
+ if (mod not in skipped_modules) and (func not in skipped_functions):
+ type = ret.xpathEval('string(@type)')
+- if not rettypes.has_key(type):
+- rettypes[type] = func
++ if type not in rettypes:
++ rettypes[type] = func
+
+ #
+ # Generate constructors and return type handling for all enums
+@@ -549,49 +549,49 @@ for enum in enums:
+ continue;
+ define = 0
+
+- if argtypes.has_key(name) and is_known_param_type(name) == 0:
+- values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
+- i = 0
+- vals = []
+- for value in values:
+- vname = value.xpathEval('string(@name)')
+- if vname == None:
+- continue;
+- i = i + 1
+- if i >= 5:
+- break;
+- vals.append(vname)
+- if vals == []:
+- print "Didn't find any value for enum %s" % (name)
+- continue
+- if modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
+- test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
+- test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
+- (name, name))
+- i = 1
+- for value in vals:
+- test.write(" if (no == %d) return(%s);\n" % (i, value))
+- i = i + 1
+- test.write(""" return(0);
++ if (name in argtypes) and is_known_param_type(name) == 0:
++ values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
++ i = 0
++ vals = []
++ for value in values:
++ vname = value.xpathEval('string(@name)')
++ if vname == None:
++ continue;
++ i = i + 1
++ if i >= 5:
++ break;
++ vals.append(vname)
++ if vals == []:
++ print("Didn't find any value for enum %s" % (name))
++ continue
++ if module in modules_defines:
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
++ test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
++ test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
++ (name, name))
++ i = 1
++ for value in vals:
++ test.write(" if (no == %d) return(%s);\n" % (i, value))
++ i = i + 1
++ test.write(""" return(0);
+ }
+
+ static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name));
+- known_param_types.append(name)
++ known_param_types.append(name)
+
+ if (is_known_return_type(name) == 0) and (name in rettypes):
+- if define == 0 and modules_defines.has_key(module):
+- test.write("#ifdef %s\n" % (modules_defines[module]))
+- define = 1
++ if define == 0 and (module in modules_defines):
++ test.write("#ifdef %s\n" % (modules_defines[module]))
++ define = 1
+ test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
+ }
+
+ """ % (name, name))
+- known_return_types.append(name)
++ known_return_types.append(name)
+ if define == 1:
+ test.write("#endif\n\n")
+
+@@ -615,9 +615,9 @@ for file in headers:
+ # do not test deprecated APIs
+ #
+ desc = file.xpathEval('string(description)')
+- if string.find(desc, 'DEPRECATED') != -1:
+- print "Skipping deprecated interface %s" % name
+- continue;
++ if desc.find('DEPRECATED') != -1:
++ print("Skipping deprecated interface %s" % name)
++ continue;
+
+ test.write("#include <libxml/%s.h>\n" % name)
+ modules.append(name)
+@@ -679,7 +679,7 @@ def generate_test(module, node):
+ # and store the informations for the generation
+ #
+ try:
+- args = node.xpathEval("arg")
++ args = node.xpathEval("arg")
+ except:
+ args = []
+ t_args = []
+@@ -687,37 +687,37 @@ def generate_test(module, node):
+ for arg in args:
+ n = n + 1
+ rtype = arg.xpathEval("string(@type)")
+- if rtype == 'void':
+- break;
+- info = arg.xpathEval("string(@info)")
+- nam = arg.xpathEval("string(@name)")
++ if rtype == 'void':
++ break;
++ info = arg.xpathEval("string(@info)")
++ nam = arg.xpathEval("string(@name)")
+ type = type_convert(rtype, nam, info, module, name, n)
+- if is_known_param_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
++ if is_known_param_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
+ if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
+- rtype[0:6] == 'const ':
+- crtype = rtype[6:]
+- else:
+- crtype = rtype
+- t_args.append((nam, type, rtype, crtype, info))
++ rtype[0:6] == 'const ':
++ crtype = rtype[6:]
++ else:
++ crtype = rtype
++ t_args.append((nam, type, rtype, crtype, info))
+
+ try:
+- rets = node.xpathEval("return")
++ rets = node.xpathEval("return")
+ except:
+ rets = []
+ t_ret = None
+ for ret in rets:
+ rtype = ret.xpathEval("string(@type)")
+- info = ret.xpathEval("string(@info)")
++ info = ret.xpathEval("string(@info)")
+ type = type_convert(rtype, 'return', info, module, name, 0)
+- if rtype == 'void':
+- break
+- if is_known_return_type(type) == 0:
+- add_missing_type(type, name);
+- no_gen = 1
+- t_ret = (type, rtype, info)
+- break
++ if rtype == 'void':
++ break
++ if is_known_return_type(type) == 0:
++ add_missing_type(type, name);
++ no_gen = 1
++ t_ret = (type, rtype, info)
++ break
+
+ if no_gen == 0:
+ for t_arg in t_args:
+@@ -733,7 +733,7 @@ test_%s(void) {
+
+ if no_gen == 1:
+ add_missing_functions(name, module)
+- test.write("""
++ test.write("""
+ /* missing type support */
+ return(test_ret);
+ }
+@@ -742,22 +742,22 @@ test_%s(void) {
+ return
+
+ try:
+- conds = node.xpathEval("cond")
+- for cond in conds:
+- test.write("#if %s\n" % (cond.get_content()))
+- nb_cond = nb_cond + 1
++ conds = node.xpathEval("cond")
++ for cond in conds:
++ test.write("#if %s\n" % (cond.get_content()))
++ nb_cond = nb_cond + 1
+ except:
+ pass
+
+ define = 0
+- if function_defines.has_key(name):
++ if name in function_defines:
+ test.write("#ifdef %s\n" % (function_defines[name]))
+- define = 1
++ define = 1
+
+ # Declare the memory usage counter
+ no_mem = is_skipped_memcheck(name)
+ if no_mem == 0:
+- test.write(" int mem_base;\n");
++ test.write(" int mem_base;\n");
+
+ # Declare the return value
+ if t_ret != None:
+@@ -766,29 +766,29 @@ test_%s(void) {
+ # Declare the arguments
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # add declaration
+- test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
+- test.write(" int n_%s;\n" % (nam))
++ # add declaration
++ test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
++ test.write(" int n_%s;\n" % (nam))
+ test.write("\n")
+
+ # Cascade loop on of each argument list of values
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
+- nam, nam, type, nam))
++ #
++ test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
++ nam, nam, type, nam))
+
+ # log the memory usage
+ if no_mem == 0:
+- test.write(" mem_base = xmlMemBlocks();\n");
++ test.write(" mem_base = xmlMemBlocks();\n");
+
+ # prepare the call
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- #
+- test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
+- i = i + 1;
++ #
++ test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
++ i = i + 1;
+
+ # add checks to avoid out-of-bounds array access
+ i = 0;
+@@ -797,7 +797,7 @@ test_%s(void) {
+ # assume that "size", "len", and "start" parameters apply to either
+ # the nearest preceding or following char pointer
+ if type == "int" and (nam == "size" or nam == "len" or nam == "start"):
+- for j in range(i - 1, -1, -1) + range(i + 1, len(t_args)):
++ for j in (*range(i - 1, -1, -1), *range(i + 1, len(t_args))):
+ (bnam, btype) = t_args[j][:2]
+ if btype == "const_char_ptr" or btype == "const_xmlChar_ptr":
+ test.write(
+@@ -806,42 +806,42 @@ test_%s(void) {
+ " continue;\n"
+ % (bnam, nam, bnam))
+ break
+- i = i + 1;
++ i = i + 1;
+
+ # do the call, and clanup the result
+- if extra_pre_call.has_key(name):
+- test.write(" %s\n"% (extra_pre_call[name]))
++ if name in extra_pre_call:
++ test.write(" %s\n"% (extra_pre_call[name]))
+ if t_ret != None:
+- test.write("\n ret_val = %s(" % (name))
+- need = 0
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam);
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
+- test.write(" desret_%s(ret_val);\n" % t_ret[0])
++ test.write("\n ret_val = %s(" % (name))
++ need = 0
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam);
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
++ test.write(" desret_%s(ret_val);\n" % t_ret[0])
+ else:
+- test.write("\n %s(" % (name));
+- need = 0;
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- if need:
+- test.write(", ")
+- else:
+- need = 1
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s" % nam)
+- test.write(");\n")
+- if extra_post_call.has_key(name):
+- test.write(" %s\n"% (extra_post_call[name]))
++ test.write("\n %s(" % (name));
++ need = 0;
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ if need:
++ test.write(", ")
++ else:
++ need = 1
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s" % nam)
++ test.write(");\n")
++ if name in extra_post_call:
++ test.write(" %s\n"% (extra_post_call[name]))
+
+ test.write(" call_tests++;\n");
+
+@@ -849,32 +849,32 @@ test_%s(void) {
+ i = 0;
+ for arg in t_args:
+ (nam, type, rtype, crtype, info) = arg;
+- # This is a hack to prevent generating a destructor for the
+- # 'input' argument in xmlTextReaderSetup. There should be
+- # a better, more generic way to do this!
+- if string.find(info, 'destroy') == -1:
+- test.write(" des_%s(n_%s, " % (type, nam))
+- if rtype != crtype:
+- test.write("(%s)" % rtype)
+- test.write("%s, %d);\n" % (nam, i))
+- i = i + 1;
++ # This is a hack to prevent generating a destructor for the
++ # 'input' argument in xmlTextReaderSetup. There should be
++ # a better, more generic way to do this!
++ if info.find('destroy') == -1:
++ test.write(" des_%s(n_%s, " % (type, nam))
++ if rtype != crtype:
++ test.write("(%s)" % rtype)
++ test.write("%s, %d);\n" % (nam, i))
++ i = i + 1;
+
+ test.write(" xmlResetLastError();\n");
+ # Check the memory usage
+ if no_mem == 0:
+- test.write(""" if (mem_base != xmlMemBlocks()) {
++ test.write(""" if (mem_base != xmlMemBlocks()) {
+ printf("Leak of %%d blocks found in %s",
+- xmlMemBlocks() - mem_base);
+- test_ret++;
++\t xmlMemBlocks() - mem_base);
++\t test_ret++;
+ """ % (name));
+- for arg in t_args:
+- (nam, type, rtype, crtype, info) = arg;
+- test.write(""" printf(" %%d", n_%s);\n""" % (nam))
+- test.write(""" printf("\\n");\n""")
+- test.write(" }\n")
++ for arg in t_args:
++ (nam, type, rtype, crtype, info) = arg;
++ test.write(""" printf(" %%d", n_%s);\n""" % (nam))
++ test.write(""" printf("\\n");\n""")
++ test.write(" }\n")
+
+ for arg in t_args:
+- test.write(" }\n")
++ test.write(" }\n")
+
+ test.write(" function_tests++;\n")
+ #
+@@ -882,7 +882,7 @@ test_%s(void) {
+ #
+ while nb_cond > 0:
+ test.write("#endif\n")
+- nb_cond = nb_cond -1
++ nb_cond = nb_cond -1
+ if define == 1:
+ test.write("#endif\n")
+
+@@ -900,10 +900,10 @@ test_%s(void) {
+ for module in modules:
+ # gather all the functions exported by that module
+ try:
+- functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
++ functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
+ except:
+- print "Failed to gather functions from module %s" % (module)
+- continue;
++ print("Failed to gather functions from module %s" % (module))
++ continue;
+
+ # iterate over all functions in the module generating the test
+ i = 0
+@@ -923,14 +923,14 @@ test_%s(void) {
+ # iterate over all functions in the module generating the call
+ for function in functions:
+ name = function.xpathEval('string(@name)')
+- if is_skipped_function(name):
+- continue
+- test.write(" test_ret += test_%s();\n" % (name))
++ if is_skipped_function(name):
++ continue
++ test.write(" test_ret += test_%s();\n" % (name))
+
+ # footer
+ test.write("""
+ if (test_ret != 0)
+- printf("Module %s: %%d errors\\n", test_ret);
++\tprintf("Module %s: %%d errors\\n", test_ret);
+ return(test_ret);
+ }
+ """ % (module))
+@@ -948,7 +948,7 @@ test.write(""" return(0);
+ }
+ """);
+
+-print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
++print("Generated test for %d modules and %d functions" %(len(modules), nb_tests))
+
+ compare_and_save()
+
+@@ -960,11 +960,8 @@ for missing in missing_types.keys():
+ n = len(missing_types[missing])
+ missing_list.append((n, missing))
+
+-def compare_missing(a, b):
+- return b[0] - a[0]
+-
+-missing_list.sort(compare_missing)
+-print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
++missing_list.sort(key=lambda a: a[0])
++print("Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list)))
+ lst = open("missing.lst", "w")
+ lst.write("Missing support for %d types" % (len(missing_list)))
+ lst.write("\n")
+@@ -974,9 +971,9 @@ for miss in missing_list:
+ for n in missing_types[miss[1]]:
+ i = i + 1
+ if i > 5:
+- lst.write(" ...")
+- break
+- lst.write(" %s" % (n))
++ lst.write(" ...")
++ break
++ lst.write(" %s" % (n))
+ lst.write("\n")
+ lst.write("\n")
+ lst.write("\n")
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch b/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch
new file mode 100644
index 0000000000..5301d05323
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2016-3709.patch
@@ -0,0 +1,89 @@
+From c1ba6f54d32b707ca6d91cb3257ce9de82876b6f Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 15 Aug 2020 18:32:29 +0200
+Subject: [PATCH] Revert "Do not URI escape in server side includes"
+
+This reverts commit 960f0e275616cadc29671a218d7fb9b69eb35588.
+
+This commit introduced
+
+- an infinite loop, found by OSS-Fuzz, which could be easily fixed.
+- an algorithm with quadratic runtime
+- a security issue, see
+ https://bugzilla.gnome.org/show_bug.cgi?id=769760
+
+A better approach is to add an option not to escape URLs at all
+which libxml2 should have possibly done in the first place.
+
+CVE: CVE-2016-3709
+Upstream-Status: Backport [https://github.com/GNOME/libxml2/commit/c1ba6f54d32b707ca6d91cb3257ce9de82876b6f]
+Signed-off-by: Pawan Badganchi <Pawan.Badganchi@kpit.com>
+---
+ HTMLtree.c | 49 +++++++++++--------------------------------------
+ 1 file changed, 11 insertions(+), 38 deletions(-)
+
+diff --git a/HTMLtree.c b/HTMLtree.c
+index 8d236bb35..cdb7f86a6 100644
+--- a/HTMLtree.c
++++ b/HTMLtree.c
+@@ -706,49 +706,22 @@ htmlAttrDumpOutput(xmlOutputBufferPtr buf, xmlDocPtr doc, xmlAttrPtr cur,
+ (!xmlStrcasecmp(cur->name, BAD_CAST "src")) ||
+ ((!xmlStrcasecmp(cur->name, BAD_CAST "name")) &&
+ (!xmlStrcasecmp(cur->parent->name, BAD_CAST "a"))))) {
++ xmlChar *escaped;
+ xmlChar *tmp = value;
+- /* xmlURIEscapeStr() escapes '"' so it can be safely used. */
+- xmlBufCCat(buf->buffer, "\"");
+
+ while (IS_BLANK_CH(*tmp)) tmp++;
+
+- /* URI Escape everything, except server side includes. */
+- for ( ; ; ) {
+- xmlChar *escaped;
+- xmlChar endChar;
+- xmlChar *end = NULL;
+- xmlChar *start = (xmlChar *)xmlStrstr(tmp, BAD_CAST "<!--");
+- if (start != NULL) {
+- end = (xmlChar *)xmlStrstr(tmp, BAD_CAST "-->");
+- if (end != NULL) {
+- *start = '\0';
+- }
+- }
+-
+- /* Escape the whole string, or until start (set to '\0'). */
+- escaped = xmlURIEscapeStr(tmp, BAD_CAST"@/:=?;#%&,+");
+- if (escaped != NULL) {
+- xmlBufCat(buf->buffer, escaped);
+- xmlFree(escaped);
+- } else {
+- xmlBufCat(buf->buffer, tmp);
+- }
+-
+- if (end == NULL) { /* Everything has been written. */
+- break;
+- }
+-
+- /* Do not escape anything within server side includes. */
+- *start = '<'; /* Restore the first character of "<!--". */
+- end += 3; /* strlen("-->") */
+- endChar = *end;
+- *end = '\0';
+- xmlBufCat(buf->buffer, start);
+- *end = endChar;
+- tmp = end;
++ /*
++ * the < and > have already been escaped at the entity level
++ * And doing so here breaks server side includes
++ */
++ escaped = xmlURIEscapeStr(tmp, BAD_CAST"@/:=?;#%&,+<>");
++ if (escaped != NULL) {
++ xmlBufWriteQuotedString(buf->buffer, escaped);
++ xmlFree(escaped);
++ } else {
++ xmlBufWriteQuotedString(buf->buffer, value);
+ }
+-
+- xmlBufCCat(buf->buffer, "\"");
+ } else {
+ xmlBufWriteQuotedString(buf->buffer, value);
+ }
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch b/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch
new file mode 100644
index 0000000000..200f42091e
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2021-3516.patch
@@ -0,0 +1,35 @@
+From 1358d157d0bd83be1dfe356a69213df9fac0b539 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 21 Apr 2021 13:23:27 +0200
+Subject: [PATCH] Fix use-after-free with `xmllint --html --push`
+
+Call htmlCtxtUseOptions to make sure that names aren't stored in
+dictionaries.
+
+Note that this issue only affects xmllint using the HTML push parser.
+
+Fixes #230.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/1358d157d0bd83be1dfe356a69213df9fac0b539]
+CVE: CVE-2021-3516
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmllint.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/xmllint.c b/xmllint.c
+index 6ca1bf54d..dbef273a8 100644
+--- a/xmllint.c
++++ b/xmllint.c
+@@ -2213,7 +2213,7 @@ static void parseAndPrintFile(char *filename, xmlParserCtxtPtr rectxt) {
+ if (res > 0) {
+ ctxt = htmlCreatePushParserCtxt(NULL, NULL,
+ chars, res, filename, XML_CHAR_ENCODING_NONE);
+- xmlCtxtUseOptions(ctxt, options);
++ htmlCtxtUseOptions(ctxt, options);
+ while ((res = fread(chars, 1, pushsize, f)) > 0) {
+ htmlParseChunk(ctxt, chars, res, 0);
+ }
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch
new file mode 100644
index 0000000000..63d613cc21
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-29824-dependent.patch
@@ -0,0 +1,53 @@
+From b07251215ef48c70c6e56f7351406c47cfca4d5b Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 10 Jan 2020 15:55:07 +0100
+Subject: [PATCH] Fix integer overflow in xmlBufferResize
+
+Found by OSS-Fuzz.
+
+CVE: CVE-2022-29824
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/b07251215ef48c70c6e56f7351406c47cfca4d5b]
+
+Signed-off-by: Riyaz Ahmed Khan <Riyaz.Khan@kpit.com>
+
+---
+ tree.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 0d7fc98c..f43f6de1 100644
+--- a/tree.c
++++ b/tree.c
+@@ -7424,12 +7424,17 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ if (size < buf->size)
+ return 1;
+
++ if (size > UINT_MAX - 10) {
++ xmlTreeErrMemory("growing buffer");
++ return 0;
++ }
++
+ /* figure out new size */
+ switch (buf->alloc){
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size*2 : size + 10);
++ newSize = (buf->size ? buf->size : size + 10);
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+@@ -7445,7 +7450,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ if (buf->use < BASE_BUFFER_SIZE)
+ newSize = size;
+ else {
+- newSize = buf->size * 2;
++ newSize = buf->size;
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+--
+GitLab
+
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch
new file mode 100644
index 0000000000..ad7b87dbc6
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-29824.patch
@@ -0,0 +1,348 @@
+From 2554a2408e09f13652049e5ffb0d26196b02ebab Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Tue, 8 Mar 2022 20:10:02 +0100
+Subject: [PATCH] [CVE-2022-29824] Fix integer overflows in xmlBuf and
+ xmlBuffer
+
+In several places, the code handling string buffers didn't check for
+integer overflow or used wrong types for buffer sizes. This could
+result in out-of-bounds writes or other memory errors when working on
+large, multi-gigabyte buffers.
+
+Thanks to Felix Wilhelm for the report.
+
+CVE: CVE-2022-29824
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/2554a2408e09f13652049e5ffb0d26196b02ebab]
+
+Signed-off-by: Riyaz Ahmed Khan <Riyaz.Khan@kpit.com>
+
+---
+ buf.c | 86 +++++++++++++++++++++++-----------------------------------
+ tree.c | 72 ++++++++++++++++++------------------------------
+ 2 files changed, 61 insertions(+), 97 deletions(-)
+
+diff --git a/buf.c b/buf.c
+index 24368d37..40a5ee06 100644
+--- a/buf.c
++++ b/buf.c
+@@ -30,6 +30,10 @@
+ #include <libxml/parserInternals.h> /* for XML_MAX_TEXT_LENGTH */
+ #include "buf.h"
+
++#ifndef SIZE_MAX
++#define SIZE_MAX ((size_t) -1)
++#endif
++
+ #define WITH_BUFFER_COMPAT
+
+ /**
+@@ -156,6 +160,8 @@ xmlBufPtr
+ xmlBufCreateSize(size_t size) {
+ xmlBufPtr ret;
+
++ if (size == SIZE_MAX)
++ return(NULL);
+ ret = (xmlBufPtr) xmlMalloc(sizeof(xmlBuf));
+ if (ret == NULL) {
+ xmlBufMemoryError(NULL, "creating buffer");
+@@ -166,8 +172,8 @@ xmlBufCreateSize(size_t size) {
+ ret->error = 0;
+ ret->buffer = NULL;
+ ret->alloc = xmlBufferAllocScheme;
+- ret->size = (size ? size+2 : 0); /* +1 for ending null */
+- ret->compat_size = (int) ret->size;
++ ret->size = (size ? size + 1 : 0); /* +1 for ending null */
++ ret->compat_size = (ret->size > INT_MAX ? INT_MAX : ret->size);
+ if (ret->size){
+ ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar));
+ if (ret->content == NULL) {
+@@ -442,23 +448,17 @@ xmlBufGrowInternal(xmlBufPtr buf, size_t len) {
+ CHECK_COMPAT(buf)
+
+ if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return(0);
+- if (buf->use + len < buf->size)
++ if (len < buf->size - buf->use)
+ return(buf->size - buf->use);
++ if (len > SIZE_MAX - buf->use)
++ return(0);
+
+- /*
+- * Windows has a BIG problem on realloc timing, so we try to double
+- * the buffer size (if that's enough) (bug 146697)
+- * Apparently BSD too, and it's probably best for linux too
+- * On an embedded system this may be something to change
+- */
+-#if 1
+- if (buf->size > (size_t) len)
+- size = buf->size * 2;
+- else
+- size = buf->use + len + 100;
+-#else
+- size = buf->use + len + 100;
+-#endif
++ if (buf->size > (size_t) len) {
++ size = buf->size > SIZE_MAX / 2 ? SIZE_MAX : buf->size * 2;
++ } else {
++ size = buf->use + len;
++ size = size > SIZE_MAX - 100 ? SIZE_MAX : size + 100;
++ }
+
+ if (buf->alloc == XML_BUFFER_ALLOC_BOUNDED) {
+ /*
+@@ -744,7 +744,7 @@ xmlBufIsEmpty(const xmlBufPtr buf)
+ int
+ xmlBufResize(xmlBufPtr buf, size_t size)
+ {
+- unsigned int newSize;
++ size_t newSize;
+ xmlChar* rebuf = NULL;
+ size_t start_buf;
+
+@@ -772,9 +772,13 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size*2 : size + 10);
++ if (buf->size == 0) {
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
++ } else {
++ newSize = buf->size;
++ }
+ while (size > newSize) {
+- if (newSize > UINT_MAX / 2) {
++ if (newSize > SIZE_MAX / 2) {
+ xmlBufMemoryError(buf, "growing buffer");
+ return 0;
+ }
+@@ -782,15 +786,15 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ }
+ break;
+ case XML_BUFFER_ALLOC_EXACT:
+- newSize = size+10;
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
+ break;
+ case XML_BUFFER_ALLOC_HYBRID:
+ if (buf->use < BASE_BUFFER_SIZE)
+ newSize = size;
+ else {
+- newSize = buf->size * 2;
++ newSize = buf->size;
+ while (size > newSize) {
+- if (newSize > UINT_MAX / 2) {
++ if (newSize > SIZE_MAX / 2) {
+ xmlBufMemoryError(buf, "growing buffer");
+ return 0;
+ }
+@@ -800,7 +804,7 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ break;
+
+ default:
+- newSize = size+10;
++ newSize = (size > SIZE_MAX - 10 ? SIZE_MAX : size + 10);
+ break;
+ }
+
+@@ -866,7 +870,7 @@ xmlBufResize(xmlBufPtr buf, size_t size)
+ */
+ int
+ xmlBufAdd(xmlBufPtr buf, const xmlChar *str, int len) {
+- unsigned int needSize;
++ size_t needSize;
+
+ if ((str == NULL) || (buf == NULL) || (buf->error))
+ return -1;
+@@ -888,8 +892,10 @@ xmlBufAdd(xmlBufPtr buf, const xmlChar *str, int len) {
+ if (len < 0) return -1;
+ if (len == 0) return 0;
+
+- needSize = buf->use + len + 2;
+- if (needSize > buf->size){
++ if ((size_t) len >= buf->size - buf->use) {
++ if ((size_t) len >= SIZE_MAX - buf->use)
++ return(-1);
++ needSize = buf->use + len + 1;
+ if (buf->alloc == XML_BUFFER_ALLOC_BOUNDED) {
+ /*
+ * Used to provide parsing limits
+@@ -1025,31 +1031,7 @@ xmlBufCat(xmlBufPtr buf, const xmlChar *str) {
+ */
+ int
+ xmlBufCCat(xmlBufPtr buf, const char *str) {
+- const char *cur;
+-
+- if ((buf == NULL) || (buf->error))
+- return(-1);
+- CHECK_COMPAT(buf)
+- if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return -1;
+- if (str == NULL) {
+-#ifdef DEBUG_BUFFER
+- xmlGenericError(xmlGenericErrorContext,
+- "xmlBufCCat: str == NULL\n");
+-#endif
+- return -1;
+- }
+- for (cur = str;*cur != 0;cur++) {
+- if (buf->use + 10 >= buf->size) {
+- if (!xmlBufResize(buf, buf->use+10)){
+- xmlBufMemoryError(buf, "growing buffer");
+- return XML_ERR_NO_MEMORY;
+- }
+- }
+- buf->content[buf->use++] = *cur;
+- }
+- buf->content[buf->use] = 0;
+- UPDATE_COMPAT(buf)
+- return 0;
++ return xmlBufCat(buf, (const xmlChar *) str);
+ }
+
+ /**
+diff --git a/tree.c b/tree.c
+index 9d94aa42..86afb7d6 100644
+--- a/tree.c
++++ b/tree.c
+@@ -7104,6 +7104,8 @@ xmlBufferPtr
+ xmlBufferCreateSize(size_t size) {
+ xmlBufferPtr ret;
+
++ if (size >= UINT_MAX)
++ return(NULL);
+ ret = (xmlBufferPtr) xmlMalloc(sizeof(xmlBuffer));
+ if (ret == NULL) {
+ xmlTreeErrMemory("creating buffer");
+@@ -7111,7 +7113,7 @@ xmlBufferCreateSize(size_t size) {
+ }
+ ret->use = 0;
+ ret->alloc = xmlBufferAllocScheme;
+- ret->size = (size ? size+2 : 0); /* +1 for ending null */
++ ret->size = (size ? size + 1 : 0); /* +1 for ending null */
+ if (ret->size){
+ ret->content = (xmlChar *) xmlMallocAtomic(ret->size * sizeof(xmlChar));
+ if (ret->content == NULL) {
+@@ -7171,6 +7173,8 @@ xmlBufferCreateStatic(void *mem, size_t size) {
+
+ if ((mem == NULL) || (size == 0))
+ return(NULL);
++ if (size > UINT_MAX)
++ return(NULL);
+
+ ret = (xmlBufferPtr) xmlMalloc(sizeof(xmlBuffer));
+ if (ret == NULL) {
+@@ -7318,28 +7322,23 @@ xmlBufferShrink(xmlBufferPtr buf, unsigned int len) {
+ */
+ int
+ xmlBufferGrow(xmlBufferPtr buf, unsigned int len) {
+- int size;
++ unsigned int size;
+ xmlChar *newbuf;
+
+ if (buf == NULL) return(-1);
+
+ if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return(0);
+- if (len + buf->use < buf->size) return(0);
++ if (len < buf->size - buf->use)
++ return(0);
++ if (len > UINT_MAX - buf->use)
++ return(-1);
+
+- /*
+- * Windows has a BIG problem on realloc timing, so we try to double
+- * the buffer size (if that's enough) (bug 146697)
+- * Apparently BSD too, and it's probably best for linux too
+- * On an embedded system this may be something to change
+- */
+-#if 1
+- if (buf->size > len)
+- size = buf->size * 2;
+- else
+- size = buf->use + len + 100;
+-#else
+- size = buf->use + len + 100;
+-#endif
++ if (buf->size > (size_t) len) {
++ size = buf->size > UINT_MAX / 2 ? UINT_MAX : buf->size * 2;
++ } else {
++ size = buf->use + len;
++ size = size > UINT_MAX - 100 ? UINT_MAX : size + 100;
++ }
+
+ if ((buf->alloc == XML_BUFFER_ALLOC_IO) && (buf->contentIO != NULL)) {
+ size_t start_buf = buf->content - buf->contentIO;
+@@ -7466,7 +7465,10 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ case XML_BUFFER_ALLOC_IO:
+ case XML_BUFFER_ALLOC_DOUBLEIT:
+ /*take care of empty case*/
+- newSize = (buf->size ? buf->size : size + 10);
++ if (buf->size == 0)
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);
++ else
++ newSize = buf->size;
+ while (size > newSize) {
+ if (newSize > UINT_MAX / 2) {
+ xmlTreeErrMemory("growing buffer");
+@@ -7476,7 +7478,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ }
+ break;
+ case XML_BUFFER_ALLOC_EXACT:
+- newSize = size+10;
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);;
+ break;
+ case XML_BUFFER_ALLOC_HYBRID:
+ if (buf->use < BASE_BUFFER_SIZE)
+@@ -7494,7 +7496,7 @@ xmlBufferResize(xmlBufferPtr buf, unsigned int size)
+ break;
+
+ default:
+- newSize = size+10;
++ newSize = (size > UINT_MAX - 10 ? UINT_MAX : size + 10);;
+ break;
+ }
+
+@@ -7580,8 +7582,10 @@ xmlBufferAdd(xmlBufferPtr buf, const xmlChar *str, int len) {
+ if (len < 0) return -1;
+ if (len == 0) return 0;
+
+- needSize = buf->use + len + 2;
+- if (needSize > buf->size){
++ if ((unsigned) len >= buf->size - buf->use) {
++ if ((unsigned) len >= UINT_MAX - buf->use)
++ return XML_ERR_NO_MEMORY;
++ needSize = buf->use + len + 1;
+ if (!xmlBufferResize(buf, needSize)){
+ xmlTreeErrMemory("growing buffer");
+ return XML_ERR_NO_MEMORY;
+@@ -7694,29 +7698,7 @@ xmlBufferCat(xmlBufferPtr buf, const xmlChar *str) {
+ */
+ int
+ xmlBufferCCat(xmlBufferPtr buf, const char *str) {
+- const char *cur;
+-
+- if (buf == NULL)
+- return(-1);
+- if (buf->alloc == XML_BUFFER_ALLOC_IMMUTABLE) return -1;
+- if (str == NULL) {
+-#ifdef DEBUG_BUFFER
+- xmlGenericError(xmlGenericErrorContext,
+- "xmlBufferCCat: str == NULL\n");
+-#endif
+- return -1;
+- }
+- for (cur = str;*cur != 0;cur++) {
+- if (buf->use + 10 >= buf->size) {
+- if (!xmlBufferResize(buf, buf->use+10)){
+- xmlTreeErrMemory("growing buffer");
+- return XML_ERR_NO_MEMORY;
+- }
+- }
+- buf->content[buf->use++] = *cur;
+- }
+- buf->content[buf->use] = 0;
+- return 0;
++ return xmlBufferCat(buf, (const xmlChar *) str);
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
new file mode 100644
index 0000000000..bdb9e9eb7a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40303.patch
@@ -0,0 +1,623 @@
+From c846986356fc149915a74972bf198abc266bc2c0 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Thu, 25 Aug 2022 17:43:08 +0200
+Subject: [PATCH] [CVE-2022-40303] Fix integer overflows with XML_PARSE_HUGE
+
+Also impose size limits when XML_PARSE_HUGE is set. Limit size of names
+to XML_MAX_TEXT_LENGTH (10 million bytes) and other content to
+XML_MAX_HUGE_LENGTH (1 billion bytes).
+
+Move some the length checks to the end of the respective loop to make
+them strict.
+
+xmlParseEntityValue didn't have a length limitation at all. But without
+XML_PARSE_HUGE, this should eventually trigger an error in xmlGROW.
+
+Thanks to Maddie Stone working with Google Project Zero for the report!
+
+CVE: CVE-2022-40303
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/c846986356fc149915a74972bf198abc266bc2c0]
+Comments: Refreshed hunk
+
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ parser.c | 233 +++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 121 insertions(+), 112 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 93f031be..79479979 100644
+--- a/parser.c
++++ b/parser.c
+@@ -102,6 +102,8 @@ xmlParseElementEnd(xmlParserCtxtPtr ctxt);
+ * *
+ ************************************************************************/
+
++#define XML_MAX_HUGE_LENGTH 1000000000
++
+ #define XML_PARSER_BIG_ENTITY 1000
+ #define XML_PARSER_LOT_ENTITY 5000
+
+@@ -552,7 +554,7 @@ xmlFatalErr(xmlParserCtxtPtr ctxt, xmlParserErrors error, const char *info)
+ errmsg = "Malformed declaration expecting version";
+ break;
+ case XML_ERR_NAME_TOO_LONG:
+- errmsg = "Name too long use XML_PARSE_HUGE option";
++ errmsg = "Name too long";
+ break;
+ #if 0
+ case:
+@@ -3202,6 +3204,9 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNameComplex++;
+@@ -3267,7 +3272,8 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+@@ -3293,13 +3299,13 @@ xmlParseNameComplex(xmlParserCtxtPtr ctxt) {
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3338,7 +3344,10 @@ const xmlChar *
+ xmlParseName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ GROW;
+
+@@ -3362,8 +3371,7 @@ xmlParseName(xmlParserCtxtPtr ctxt) {
+ in++;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Name");
+ return(NULL);
+ }
+@@ -3384,6 +3392,9 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ size_t startPosition = 0;
+
+ #ifdef DEBUG
+@@ -3404,17 +3415,13 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */
+ (xmlIsNameChar(ctxt, c) && (c != ':'))) {
+ if (count++ > XML_PARSER_CHUNK_SIZE) {
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- return(NULL);
+- }
+ count = 0;
+ GROW;
+ if (ctxt->instate == XML_PARSER_EOF)
+ return(NULL);
+ }
+- len += l;
++ if (len <= INT_MAX - l)
++ len += l;
+ NEXTL(l);
+ c = CUR_CHAR(l);
+ if (c == 0) {
+@@ -3432,8 +3439,7 @@ xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
+ c = CUR_CHAR(l);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3459,7 +3465,10 @@ static const xmlChar *
+ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ const xmlChar *in, *e;
+ const xmlChar *ret;
+- int count = 0;
++ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNCName++;
+@@ -3484,8 +3493,7 @@ xmlParseNCName(xmlParserCtxtPtr ctxt) {
+ goto complex;
+ if ((*in > 0) && (*in < 0x80)) {
+ count = in - ctxt->input->cur;
+- if ((count > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (count > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3567,6 +3575,9 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ const xmlChar *cur = *str;
+ int len = 0, l;
+ int c;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseStringName++;
+@@ -3602,12 +3613,6 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3621,14 +3626,18 @@ xmlParseStringName(xmlParserCtxtPtr ctxt, const xmlChar** str) {
+ COPY_BUF(l,buffer,len,c);
+ cur += l;
+ c = CUR_SCHAR(cur, l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ *str = cur;
+ return(buffer);
+ }
+ }
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NCName");
+ return(NULL);
+ }
+@@ -3655,6 +3664,9 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ int len = 0, l;
+ int c;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+
+ #ifdef DEBUG
+ nbParseNmToken++;
+@@ -3706,12 +3718,6 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ if (len + 10 > max) {
+ xmlChar *tmp;
+
+- if ((max > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+- xmlFree(buffer);
+- return(NULL);
+- }
+ max *= 2;
+ tmp = (xmlChar *) xmlRealloc(buffer,
+ max * sizeof(xmlChar));
+@@ -3725,6 +3731,11 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ COPY_BUF(l,buffer,len,c);
+ NEXTL(l);
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
++ xmlFree(buffer);
++ return(NULL);
++ }
+ }
+ buffer[len] = 0;
+ return(buffer);
+@@ -3732,8 +3743,7 @@ xmlParseNmtoken(xmlParserCtxtPtr ctxt) {
+ }
+ if (len == 0)
+ return(NULL);
+- if ((len > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "NmToken");
+ return(NULL);
+ }
+@@ -3759,6 +3769,9 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int c, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlChar stop;
+ xmlChar *ret = NULL;
+ const xmlChar *cur = NULL;
+@@ -3818,6 +3831,12 @@ xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
+ GROW;
+ c = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_NOT_FINISHED,
++ "entity value too long\n");
++ goto error;
++ }
+ }
+ buf[len] = 0;
+ if (ctxt->instate == XML_PARSER_EOF)
+@@ -3905,6 +3924,9 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ xmlChar *rep = NULL;
+ size_t len = 0;
+ size_t buf_size = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int c, l, in_space = 0;
+ xmlChar *current = NULL;
+ xmlEntityPtr ent;
+@@ -3925,16 +3925,6 @@
+ while (((NXT(0) != limit) && /* checked */
+ (IS_CHAR(c)) && (c != '<')) &&
+ (ctxt->instate != XML_PARSER_EOF)) {
+- /*
+- * Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE
+- * special option is given
+- */
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+ if (c == 0) break;
+ if (c == '&') {
+ in_space = 0;
+@@ -4093,6 +4105,11 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ }
+ GROW;
+ c = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
++ "AttValue length too long\n");
++ goto mem_error;
++ }
+ }
+ if (ctxt->instate == XML_PARSER_EOF)
+ goto error;
+@@ -4114,16 +4131,6 @@ xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) {
+ } else
+ NEXT;
+
+- /*
+- * There we potentially risk an overflow, don't allow attribute value of
+- * length more than INT_MAX it is a very reasonable assumption !
+- */
+- if (len >= INT_MAX) {
+- xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+- "AttValue length too long\n");
+- goto mem_error;
+- }
+-
+ if (attlen != NULL) *attlen = (int) len;
+ return(buf);
+
+@@ -4194,6 +4201,9 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
+ int cur, l;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar stop;
+ int state = ctxt->instate;
+ int count = 0;
+@@ -4221,13 +4231,6 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
+- xmlFree(buf);
+- ctxt->instate = (xmlParserInputState) state;
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4256,6 +4259,12 @@ xmlParseSystemLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "SystemLiteral");
++ xmlFree(buf);
++ ctxt->instate = (xmlParserInputState) state;
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = (xmlParserInputState) state;
+@@ -4283,6 +4292,9 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ int len = 0;
+ int size = XML_PARSER_BUFFER_SIZE;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_TEXT_LENGTH :
++ XML_MAX_NAME_LENGTH;
+ xmlChar cur;
+ xmlChar stop;
+ int count = 0;
+@@ -4310,12 +4322,6 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ if (len + 1 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_NAME_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
+- xmlFree(buf);
+- return(NULL);
+- }
+ size *= 2;
+ tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
+ if (tmp == NULL) {
+@@ -4343,6 +4349,11 @@ xmlParsePubidLiteral(xmlParserCtxtPtr ctxt) {
+ SHRINK;
+ cur = CUR;
+ }
++ if (len > maxLength) {
++ xmlFatalErr(ctxt, XML_ERR_NAME_TOO_LONG, "Public ID");
++ xmlFree(buf);
++ return(NULL);
++ }
+ }
+ buf[len] = 0;
+ if (cur != stop) {
+@@ -4742,6 +4753,9 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ int r, rl;
+ int cur, l;
+ size_t count = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int inputid;
+
+ inputid = ctxt->input->id;
+@@ -4787,13 +4801,6 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ if ((r == '-') && (q == '-')) {
+ xmlFatalErr(ctxt, XML_ERR_HYPHEN_IN_COMMENT, NULL);
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+- "Comment too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ if (len + 5 >= size) {
+ xmlChar *new_buf;
+ size_t new_size;
+@@ -4831,6 +4838,13 @@ xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf,
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
++ "Comment too big found", NULL);
++ xmlFree (buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ if (cur == 0) {
+@@ -4875,6 +4889,9 @@ xmlParseComment(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t size = XML_PARSER_BUFFER_SIZE;
+ size_t len = 0;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ xmlParserInputState state;
+ const xmlChar *in;
+ size_t nbchar = 0;
+@@ -4958,8 +4975,7 @@ get_more:
+ buf[len] = 0;
+ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if (len > maxLength) {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED,
+ "Comment too big found", NULL);
+ xmlFree (buf);
+@@ -5159,6 +5175,9 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ xmlChar *buf = NULL;
+ size_t len = 0;
+ size_t size = XML_PARSER_BUFFER_SIZE;
++ size_t maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+ int cur, l;
+ const xmlChar *target;
+ xmlParserInputState state;
+@@ -5234,14 +5253,6 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ return;
+ }
+ count = 0;
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ }
+ COPY_BUF(l,buf,len,cur);
+ NEXTL(l);
+@@ -5251,15 +5262,14 @@ xmlParsePI(xmlParserCtxtPtr ctxt) {
+ GROW;
+ cur = CUR_CHAR(l);
+ }
++ if (len > maxLength) {
++ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
++ "PI %s too big found", target);
++ xmlFree(buf);
++ ctxt->instate = state;
++ return;
++ }
+ }
+- if ((len > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+- "PI %s too big found", target);
+- xmlFree(buf);
+- ctxt->instate = state;
+- return;
+- }
+ buf[len] = 0;
+ if (cur != '?') {
+ xmlFatalErrMsgStr(ctxt, XML_ERR_PI_NOT_FINISHED,
+@@ -8954,6 +8964,9 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ const xmlChar *in = NULL, *start, *end, *last;
+ xmlChar *ret = NULL;
+ int line, col;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ GROW;
+ in = (xmlChar *) CUR_PTR;
+@@ -8993,8 +9006,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ start = in;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9007,8 +9019,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ if ((*in++ == 0x20) && (*in == 0x20)) break;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9041,16 +9052,14 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ last = last + delta;
+ }
+ end = ctxt->input->end;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+ }
+ }
+ }
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9063,8 +9072,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ col++;
+ if (in >= end) {
+ GROW_PARSE_ATT_VALUE_INTERNAL(ctxt, in, start, end)
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9072,8 +9080,7 @@ xmlParseAttValueInternal(xmlParserCtxtPtr ctxt, int *len, int *alloc,
+ }
+ }
+ last = in;
+- if (((in - start) > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
++ if ((in - start) > maxLength) {
+ xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED,
+ "AttValue length too long\n");
+ return(NULL);
+@@ -9763,6 +9770,9 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ int s, sl;
+ int cur, l;
+ int count = 0;
++ int maxLength = (ctxt->options & XML_PARSE_HUGE) ?
++ XML_MAX_HUGE_LENGTH :
++ XML_MAX_TEXT_LENGTH;
+
+ /* Check 2.6.0 was NXT(0) not RAW */
+ if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
+@@ -9796,13 +9806,6 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ if (len + 5 >= size) {
+ xmlChar *tmp;
+
+- if ((size > XML_MAX_TEXT_LENGTH) &&
+- ((ctxt->options & XML_PARSE_HUGE) == 0)) {
+- xmlFatalErrMsgStr(ctxt, XML_ERR_CDATA_NOT_FINISHED,
+- "CData section too big found", NULL);
+- xmlFree (buf);
+- return;
+- }
+ tmp = (xmlChar *) xmlRealloc(buf, size * 2 * sizeof(xmlChar));
+ if (tmp == NULL) {
+ xmlFree(buf);
+@@ -9829,6 +9832,12 @@ xmlParseCDSect(xmlParserCtxtPtr ctxt) {
+ }
+ NEXTL(l);
+ cur = CUR_CHAR(l);
++ if (len > maxLength) {
++ xmlFatalErrMsg(ctxt, XML_ERR_CDATA_NOT_FINISHED,
++ "CData section too big found\n");
++ xmlFree(buf);
++ return;
++ }
+ }
+ buf[len] = 0;
+ ctxt->instate = XML_PARSER_CONTENT;
+--
+GitLab
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
new file mode 100644
index 0000000000..c19726fe9f
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2022-40304.patch
@@ -0,0 +1,104 @@
+From 1b41ec4e9433b05bb0376be4725804c54ef1d80b Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 31 Aug 2022 22:11:25 +0200
+Subject: [PATCH] [CVE-2022-40304] Fix dict corruption caused by entity
+ reference cycles
+
+When an entity reference cycle is detected, the entity content is
+cleared by setting its first byte to zero. But the entity content might
+be allocated from a dict. In this case, the dict entry becomes corrupted
+leading to all kinds of logic errors, including memory errors like
+double-frees.
+
+Stop storing entity content, orig, ExternalID and SystemID in a dict.
+These values are unlikely to occur multiple times in a document, so they
+shouldn't have been stored in a dict in the first place.
+
+Thanks to Ned Williamson and Nathan Wachholz working with Google Project
+Zero for the report!
+
+CVE: CVE-2022-40304
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/1b41ec4e9433b05bb0376be4725804c54ef1d80b]
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ entities.c | 55 ++++++++++++++++--------------------------------------
+ 1 file changed, 16 insertions(+), 39 deletions(-)
+
+diff --git a/entities.c b/entities.c
+index 84435515..d4e5412e 100644
+--- a/entities.c
++++ b/entities.c
+@@ -128,36 +128,19 @@ xmlFreeEntity(xmlEntityPtr entity)
+ if ((entity->children) && (entity->owner == 1) &&
+ (entity == (xmlEntityPtr) entity->children->parent))
+ xmlFreeNodeList(entity->children);
+- if (dict != NULL) {
+- if ((entity->name != NULL) && (!xmlDictOwns(dict, entity->name)))
+- xmlFree((char *) entity->name);
+- if ((entity->ExternalID != NULL) &&
+- (!xmlDictOwns(dict, entity->ExternalID)))
+- xmlFree((char *) entity->ExternalID);
+- if ((entity->SystemID != NULL) &&
+- (!xmlDictOwns(dict, entity->SystemID)))
+- xmlFree((char *) entity->SystemID);
+- if ((entity->URI != NULL) && (!xmlDictOwns(dict, entity->URI)))
+- xmlFree((char *) entity->URI);
+- if ((entity->content != NULL)
+- && (!xmlDictOwns(dict, entity->content)))
+- xmlFree((char *) entity->content);
+- if ((entity->orig != NULL) && (!xmlDictOwns(dict, entity->orig)))
+- xmlFree((char *) entity->orig);
+- } else {
+- if (entity->name != NULL)
+- xmlFree((char *) entity->name);
+- if (entity->ExternalID != NULL)
+- xmlFree((char *) entity->ExternalID);
+- if (entity->SystemID != NULL)
+- xmlFree((char *) entity->SystemID);
+- if (entity->URI != NULL)
+- xmlFree((char *) entity->URI);
+- if (entity->content != NULL)
+- xmlFree((char *) entity->content);
+- if (entity->orig != NULL)
+- xmlFree((char *) entity->orig);
+- }
++ if ((entity->name != NULL) &&
++ ((dict == NULL) || (!xmlDictOwns(dict, entity->name))))
++ xmlFree((char *) entity->name);
++ if (entity->ExternalID != NULL)
++ xmlFree((char *) entity->ExternalID);
++ if (entity->SystemID != NULL)
++ xmlFree((char *) entity->SystemID);
++ if (entity->URI != NULL)
++ xmlFree((char *) entity->URI);
++ if (entity->content != NULL)
++ xmlFree((char *) entity->content);
++ if (entity->orig != NULL)
++ xmlFree((char *) entity->orig);
+ xmlFree(entity);
+ }
+
+@@ -193,18 +176,12 @@ xmlCreateEntity(xmlDictPtr dict, const xmlChar *name, int type,
+ ret->SystemID = xmlStrdup(SystemID);
+ } else {
+ ret->name = xmlDictLookup(dict, name, -1);
+- if (ExternalID != NULL)
+- ret->ExternalID = xmlDictLookup(dict, ExternalID, -1);
+- if (SystemID != NULL)
+- ret->SystemID = xmlDictLookup(dict, SystemID, -1);
++ ret->ExternalID = xmlStrdup(ExternalID);
++ ret->SystemID = xmlStrdup(SystemID);
+ }
+ if (content != NULL) {
+ ret->length = xmlStrlen(content);
+- if ((dict != NULL) && (ret->length < 5))
+- ret->content = (xmlChar *)
+- xmlDictLookup(dict, content, ret->length);
+- else
+- ret->content = xmlStrndup(content, ret->length);
++ ret->content = xmlStrndup(content, ret->length);
+ } else {
+ ret->length = 0;
+ ret->content = NULL;
+--
+GitLab
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
new file mode 100644
index 0000000000..907f2c4d47
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-28484.patch
@@ -0,0 +1,79 @@
+From e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:46:35 +0200
+Subject: [PATCH] [CVE-2023-28484] Fix null deref in xmlSchemaFixupComplexType
+
+Fix a null pointer dereference when parsing (invalid) XML schemas.
+
+Thanks to Robby Simpson for the report!
+
+Fixes #491.
+
+CVE: CVE-2023-28484
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/e4f85f1bd2eb34d9b49da9154a4cc3a1bc284f68]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ result/schemas/issue491_0_0.err | 1 +
+ test/schemas/issue491_0.xml | 1 +
+ test/schemas/issue491_0.xsd | 18 ++++++++++++++++++
+ xmlschemas.c | 2 +-
+ 4 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644 result/schemas/issue491_0_0.err
+ create mode 100644 test/schemas/issue491_0.xml
+ create mode 100644 test/schemas/issue491_0.xsd
+
+diff --git a/result/schemas/issue491_0_0.err b/result/schemas/issue491_0_0.err
+new file mode 100644
+index 00000000..9b2bb969
+--- /dev/null
++++ b/result/schemas/issue491_0_0.err
+@@ -0,0 +1 @@
++./test/schemas/issue491_0.xsd:8: element complexType: Schemas parser error : complex type 'ChildType': The content type of both, the type and its base type, must either 'mixed' or 'element-only'.
+diff --git a/test/schemas/issue491_0.xml b/test/schemas/issue491_0.xml
+new file mode 100644
+index 00000000..e2b2fc2e
+--- /dev/null
++++ b/test/schemas/issue491_0.xml
+@@ -0,0 +1 @@
++<Child xmlns="http://www.test.com">5</Child>
+diff --git a/test/schemas/issue491_0.xsd b/test/schemas/issue491_0.xsd
+new file mode 100644
+index 00000000..81702649
+--- /dev/null
++++ b/test/schemas/issue491_0.xsd
+@@ -0,0 +1,18 @@
++<?xml version='1.0' encoding='UTF-8'?>
++<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns="http://www.test.com" targetNamespace="http://www.test.com" elementFormDefault="qualified" attributeFormDefault="unqualified">
++ <xs:complexType name="BaseType">
++ <xs:simpleContent>
++ <xs:extension base="xs:int" />
++ </xs:simpleContent>
++ </xs:complexType>
++ <xs:complexType name="ChildType">
++ <xs:complexContent>
++ <xs:extension base="BaseType">
++ <xs:sequence>
++ <xs:element name="bad" type="xs:int" minOccurs="0" maxOccurs="1"/>
++ </xs:sequence>
++ </xs:extension>
++ </xs:complexContent>
++ </xs:complexType>
++ <xs:element name="Child" type="ChildType" />
++</xs:schema>
+diff --git a/xmlschemas.c b/xmlschemas.c
+index 6a353858..a4eaf591 100644
+--- a/xmlschemas.c
++++ b/xmlschemas.c
+@@ -18632,7 +18632,7 @@ xmlSchemaFixupComplexType(xmlSchemaParserCtxtPtr pctxt,
+ "allowed to appear inside other model groups",
+ NULL, NULL);
+
+- } else if (! dummySequence) {
++ } else if ((!dummySequence) && (baseType->subtypes != NULL)) {
+ xmlSchemaTreeItemPtr effectiveContent =
+ (xmlSchemaTreeItemPtr) type->subtypes;
+ /*
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
new file mode 100644
index 0000000000..1252668577
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-29469.patch
@@ -0,0 +1,42 @@
+From 547edbf1cbdccd46b2e8ff322a456eaa5931c5df Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 7 Apr 2023 11:49:27 +0200
+Subject: [PATCH] [CVE-2023-29469] Hashing of empty dict strings isn't
+ deterministic
+
+When hashing empty strings which aren't null-terminated,
+xmlDictComputeFastKey could produce inconsistent results. This could
+lead to various logic or memory errors, including double frees.
+
+For consistency the seed is also taken into account, but this shouldn't
+have an impact on security.
+
+Found by OSS-Fuzz.
+
+Fixes #510.
+
+CVE: CVE-2023-29469
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/547edbf1cbdccd46b2e8ff322a456eaa5931c5df]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ dict.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/dict.c b/dict.c
+index 86c3f6d7..d7fd1a06 100644
+--- a/dict.c
++++ b/dict.c
+@@ -451,7 +451,8 @@ static unsigned long
+ xmlDictComputeFastKey(const xmlChar *name, int namelen, int seed) {
+ unsigned long value = seed;
+
+- if (name == NULL) return(0);
++ if ((name == NULL) || (namelen <= 0))
++ return(value);
+ value = *name;
+ value <<= 5;
+ if (namelen > 10) {
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
new file mode 100644
index 0000000000..9689cec67d
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0001.patch
@@ -0,0 +1,36 @@
+From d0c3f01e110d54415611c5fa0040cdf4a56053f9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 6 May 2023 17:47:37 +0200
+Subject: [PATCH] parser: Fix old SAX1 parser with custom callbacks
+
+For some reason, xmlCtxtUseOptionsInternal set the start and end element
+SAX handlers to the internal DOM builder functions when XML_PARSE_SAX1
+was specified. This means that custom SAX handlers could never work with
+that flag because these functions would receive the wrong user data
+argument and crash immediately.
+
+Fixes #535.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d0c3f01e110d54415611c5fa0040cdf4a56053f9]
+CVE: CVE-2023-39615
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ parser.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index 6e09208..7814e6e 100644
+--- a/parser.c
++++ b/parser.c
+@@ -15156,8 +15156,6 @@ xmlCtxtUseOptionsInternal(xmlParserCtxtPtr ctxt, int options, const char *encodi
+ }
+ #ifdef LIBXML_SAX1_ENABLED
+ if (options & XML_PARSE_SAX1) {
+- ctxt->sax->startElement = xmlSAX2StartElement;
+- ctxt->sax->endElement = xmlSAX2EndElement;
+ ctxt->sax->startElementNs = NULL;
+ ctxt->sax->endElementNs = NULL;
+ ctxt->sax->initialized = 1;
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
new file mode 100644
index 0000000000..ebd9868fac
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-0002.patch
@@ -0,0 +1,71 @@
+From 235b15a590eecf97b09e87bdb7e4f8333e9de129 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Mon, 8 May 2023 17:58:02 +0200
+Subject: [PATCH] SAX: Always initialize SAX1 element handlers
+
+Follow-up to commit d0c3f01e. A parser context will be initialized to
+SAX version 2, but this can be overridden with XML_PARSE_SAX1 later,
+so we must initialize the SAX1 element handlers as well.
+
+Change the check in xmlDetectSAX2 to only look for XML_SAX2_MAGIC, so
+we don't switch to SAX1 if the SAX2 element handlers are NULL.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/235b15a590eecf97b09e87bdb7e4f8333e9de129]
+CVE: CVE-2023-39615
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ SAX2.c | 11 +++++++----
+ parser.c | 5 +----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/SAX2.c b/SAX2.c
+index 5f141f9..902d34d 100644
+--- a/SAX2.c
++++ b/SAX2.c
+@@ -2869,20 +2869,23 @@ xmlSAXVersion(xmlSAXHandler *hdlr, int version)
+ {
+ if (hdlr == NULL) return(-1);
+ if (version == 2) {
+- hdlr->startElement = NULL;
+- hdlr->endElement = NULL;
+ hdlr->startElementNs = xmlSAX2StartElementNs;
+ hdlr->endElementNs = xmlSAX2EndElementNs;
+ hdlr->serror = NULL;
+ hdlr->initialized = XML_SAX2_MAGIC;
+ #ifdef LIBXML_SAX1_ENABLED
+ } else if (version == 1) {
+- hdlr->startElement = xmlSAX2StartElement;
+- hdlr->endElement = xmlSAX2EndElement;
+ hdlr->initialized = 1;
+ #endif /* LIBXML_SAX1_ENABLED */
+ } else
+ return(-1);
++#ifdef LIBXML_SAX1_ENABLED
++ hdlr->startElement = xmlSAX2StartElement;
++ hdlr->endElement = xmlSAX2EndElement;
++#else
++ hdlr->startElement = NULL;
++ hdlr->endElement = NULL;
++#endif /* LIBXML_SAX1_ENABLED */
+ hdlr->internalSubset = xmlSAX2InternalSubset;
+ hdlr->externalSubset = xmlSAX2ExternalSubset;
+ hdlr->isStandalone = xmlSAX2IsStandalone;
+diff --git a/parser.c b/parser.c
+index 7814e6e..cf0fb38 100644
+--- a/parser.c
++++ b/parser.c
+@@ -1102,10 +1102,7 @@ xmlDetectSAX2(xmlParserCtxtPtr ctxt) {
+ if (ctxt == NULL) return;
+ sax = ctxt->sax;
+ #ifdef LIBXML_SAX1_ENABLED
+- if ((sax) && (sax->initialized == XML_SAX2_MAGIC) &&
+- ((sax->startElementNs != NULL) ||
+- (sax->endElementNs != NULL) ||
+- ((sax->startElement == NULL) && (sax->endElement == NULL))))
++ if ((sax) && (sax->initialized == XML_SAX2_MAGIC))
+ ctxt->sax2 = 1;
+ #else
+ ctxt->sax2 = 1;
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch
new file mode 100644
index 0000000000..b177cdaba0
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-39615-pre.patch
@@ -0,0 +1,44 @@
+From 99fc048d7f7292c5ee18e44c400bd73bc63a47ed Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Fri, 14 Aug 2020 14:18:50 +0200
+Subject: [PATCH] Don't use SAX1 if all element handlers are NULL
+
+Running xmllint with "--sax --noout" installs a SAX2 handler with all
+callbacks set to NULL. In this case or similar situations, we don't want
+to switch to SAX1 parsing.
+
+Note: This patch is needed for "CVE-2023-39615-0002" patch to apply.
+Without this patch the build will fail with undefined sax error.
+
+Upstream-Status: Backport from [https://gitlab.gnome.org/GNOME/libxml2/-/commit/99fc048d7f7292c5ee18e44c400bd73bc63a47ed]
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ parser.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/parser.c b/parser.c
+index bb677b0..6e09208 100644
+--- a/parser.c
++++ b/parser.c
+@@ -1098,11 +1098,15 @@ xmlHasFeature(xmlFeature feature)
+ */
+ static void
+ xmlDetectSAX2(xmlParserCtxtPtr ctxt) {
++ xmlSAXHandlerPtr sax;
+ if (ctxt == NULL) return;
++ sax = ctxt->sax;
+ #ifdef LIBXML_SAX1_ENABLED
+- if ((ctxt->sax) && (ctxt->sax->initialized == XML_SAX2_MAGIC) &&
+- ((ctxt->sax->startElementNs != NULL) ||
+- (ctxt->sax->endElementNs != NULL))) ctxt->sax2 = 1;
++ if ((sax) && (sax->initialized == XML_SAX2_MAGIC) &&
++ ((sax->startElementNs != NULL) ||
++ (sax->endElementNs != NULL) ||
++ ((sax->startElement == NULL) && (sax->endElement == NULL))))
++ ctxt->sax2 = 1;
+ #else
+ ctxt->sax2 = 1;
+ #endif /* LIBXML_SAX1_ENABLED */
+--
+2.24.4
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
new file mode 100644
index 0000000000..182bb29abd
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-1.patch
@@ -0,0 +1,50 @@
+From a22bd982bf10291deea8ba0c61bf75b898c604ce Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 2 Nov 2022 15:44:42 +0100
+Subject: [PATCH] malloc-fail: Fix memory leak in xmlStaticCopyNodeList
+
+Found with libFuzzer, see #344.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/a22bd982bf10291deea8ba0c61bf75b898c604ce]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tree.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 507869efe..647288ce3 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4461,7 +4461,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ if (doc->intSubset == NULL) {
+ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ q->doc = doc;
+ q->parent = parent;
+ doc->intSubset = (xmlDtdPtr) q;
+@@ -4473,7 +4473,7 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ } else
+ #endif /* LIBXML_TREE_ENABLED */
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) return(NULL);
++ if (q == NULL) goto error;
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4486,6 +4486,9 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ node = node->next;
+ }
+ return(ret);
++error:
++ xmlFreeNodeList(ret);
++ return(NULL);
+ }
+
+ /**
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
new file mode 100644
index 0000000000..c7e9681e6a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2023-45322-2.patch
@@ -0,0 +1,80 @@
+From d39f78069dff496ec865c73aa44d7110e429bce9 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Wed, 23 Aug 2023 20:24:24 +0200
+Subject: [PATCH] tree: Fix copying of DTDs
+
+- Don't create multiple DTD nodes.
+- Fix UAF if malloc fails.
+- Skip DTD nodes if tree module is disabled.
+
+Fixes #583.
+
+CVE: CVE-2023-45322
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/d39f78069dff496ec865c73aa44d7110e429bce9]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tree.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/tree.c b/tree.c
+index 6c8a875b9..02c1b5791 100644
+--- a/tree.c
++++ b/tree.c
+@@ -4471,29 +4471,28 @@ xmlNodePtr
+ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ xmlNodePtr ret = NULL;
+ xmlNodePtr p = NULL,q;
++ xmlDtdPtr newSubset = NULL;
+
+ while (node != NULL) {
+-#ifdef LIBXML_TREE_ENABLED
+ if (node->type == XML_DTD_NODE ) {
+- if (doc == NULL) {
++#ifdef LIBXML_TREE_ENABLED
++ if ((doc == NULL) || (doc->intSubset != NULL)) {
+ node = node->next;
+ continue;
+ }
+- if (doc->intSubset == NULL) {
+- q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
+- if (q == NULL) goto error;
+- q->doc = doc;
+- q->parent = parent;
+- doc->intSubset = (xmlDtdPtr) q;
+- xmlAddChild(parent, q);
+- } else {
+- q = (xmlNodePtr) doc->intSubset;
+- xmlAddChild(parent, q);
+- }
+- } else
++ q = (xmlNodePtr) xmlCopyDtd( (xmlDtdPtr) node );
++ if (q == NULL) goto error;
++ q->doc = doc;
++ q->parent = parent;
++ newSubset = (xmlDtdPtr) q;
++#else
++ node = node->next;
++ continue;
+ #endif /* LIBXML_TREE_ENABLED */
++ } else {
+ q = xmlStaticCopyNode(node, doc, parent, 1);
+- if (q == NULL) goto error;
++ if (q == NULL) goto error;
++ }
+ if (ret == NULL) {
+ q->prev = NULL;
+ ret = p = q;
+@@ -4505,6 +4504,8 @@ xmlStaticCopyNodeList(xmlNodePtr node, xmlDocPtr doc, xmlNodePtr parent) {
+ }
+ node = node->next;
+ }
++ if (newSubset != NULL)
++ doc->intSubset = newSubset;
+ return(ret);
+ error:
+ xmlFreeNodeList(ret);
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch b/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch
new file mode 100644
index 0000000000..31183399f8
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2024-25062-pre1.patch
@@ -0,0 +1,38 @@
+From 31c6ce3b63f8a494ad9e31ca65187a73d8ad3508 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Mon, 9 Nov 2020 17:55:44 +0100
+Subject: [PATCH] Avoid call stack overflow with XML reader and recursive
+ XIncludes
+
+Don't process XIncludes in the result of another inclusion to avoid
+infinite recursion resulting in a call stack overflow.
+
+This is something the XInclude engine shouldn't allow but correct
+handling of intra-document includes would require major changes.
+
+Found by OSS-Fuzz.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/31c6ce3b63f8a494ad9e31ca65187a73d8ad3508]
+CVE: CVE-2024-25062 #Dependency Patch
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmlreader.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/xmlreader.c b/xmlreader.c
+index 01adf74f4..72e40b032 100644
+--- a/xmlreader.c
++++ b/xmlreader.c
+@@ -1585,7 +1585,8 @@ node_found:
+ /*
+ * Handle XInclude if asked for
+ */
+- if ((reader->xinclude) && (reader->node != NULL) &&
++ if ((reader->xinclude) && (reader->in_xinclude == 0) &&
++ (reader->node != NULL) &&
+ (reader->node->type == XML_ELEMENT_NODE) &&
+ (reader->node->ns != NULL) &&
+ ((xmlStrEqual(reader->node->ns->href, XINCLUDE_NS)) ||
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
new file mode 100644
index 0000000000..5365d5546a
--- /dev/null
+++ b/meta/recipes-core/libxml/libxml2/CVE-2024-25062.patch
@@ -0,0 +1,33 @@
+From 2b0aac140d739905c7848a42efc60bfe783a39b7 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 14 Oct 2023 22:45:54 +0200
+Subject: [PATCH] [CVE-2024-25062] xmlreader: Don't expand XIncludes when
+ backtracking
+
+Fixes a use-after-free if XML Reader if used with DTD validation and
+XInclude expansion.
+
+Fixes #604.
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/2b0aac140d739905c7848a42efc60bfe783a39b7]
+CVE: CVE-2024-25062
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ xmlreader.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xmlreader.c b/xmlreader.c
+index 979385a13..fefd68e0b 100644
+--- a/xmlreader.c
++++ b/xmlreader.c
+@@ -1443,6 +1443,7 @@ node_found:
+ * Handle XInclude if asked for
+ */
+ if ((reader->xinclude) && (reader->in_xinclude == 0) &&
++ (reader->state != XML_TEXTREADER_BACKTRACK) &&
+ (reader->node != NULL) &&
+ (reader->node->type == XML_ELEMENT_NODE) &&
+ (reader->node->ns != NULL) &&
+--
+GitLab
+
diff --git a/meta/recipes-core/libxml/libxml2_2.9.10.bb b/meta/recipes-core/libxml/libxml2_2.9.10.bb
index c4bb8f29e0..72f830b6d3 100644
--- a/meta/recipes-core/libxml/libxml2_2.9.10.bb
+++ b/meta/recipes-core/libxml/libxml2_2.9.10.bb
@@ -30,6 +30,22 @@ SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar.gz;subdir=${BP};name=te
file://CVE-2021-3541.patch \
file://CVE-2022-23308.patch \
file://CVE-2022-23308-fix-regression.patch \
+ file://CVE-2022-29824-dependent.patch \
+ file://CVE-2022-29824.patch \
+ file://0001-Port-gentest.py-to-Python-3.patch \
+ file://CVE-2016-3709.patch \
+ file://CVE-2022-40303.patch \
+ file://CVE-2022-40304.patch \
+ file://CVE-2023-28484.patch \
+ file://CVE-2023-29469.patch \
+ file://CVE-2023-39615-pre.patch \
+ file://CVE-2023-39615-0001.patch \
+ file://CVE-2023-39615-0002.patch \
+ file://CVE-2021-3516.patch \
+ file://CVE-2023-45322-1.patch \
+ file://CVE-2023-45322-2.patch \
+ file://CVE-2024-25062-pre1.patch \
+ file://CVE-2024-25062.patch \
"
SRC_URI[archive.sha256sum] = "593b7b751dd18c2d6abcd0c4bcb29efc203d0b4373a6df98e3a455ea74ae2813"
@@ -87,6 +103,16 @@ do_configure_prepend () {
}
do_compile_ptest() {
+ # Make sure that testapi.c is newer than gentests.py, because
+ # with reproducible builds, they will both get e.g. Jan 1 1970
+ # modification time from SOURCE_DATE_EPOCH and then check-am
+ # might try to rebuild_testapi, which will fail even with
+ # 0001-Port-gentest.py-to-Python-3.patch, because it needs
+ # libxml2 module (libxml2-native dependency and correctly
+ # set PYTHON_SITE_PACKAGES), it's easier to
+ # just rely on pre-generated testapi.c from the release
+ touch ${S}/testapi.c
+
oe_runmake check-am
}
diff --git a/meta/recipes-core/meta/buildtools-tarball.bb b/meta/recipes-core/meta/buildtools-tarball.bb
index faf7108a86..24f5f28589 100644
--- a/meta/recipes-core/meta/buildtools-tarball.bb
+++ b/meta/recipes-core/meta/buildtools-tarball.bb
@@ -66,7 +66,7 @@ create_sdk_files_append () {
# Generate new (mini) sdk-environment-setup file
script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-${SDK_SYS}}
touch $script
- echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${sbindir_nativesdk}:${SDKPATHNATIVE}${base_bindir_nativesdk}:${SDKPATHNATIVE}${base_sbindir_nativesdk}:$PATH' >> $script
+ echo 'export PATH="${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${sbindir_nativesdk}:${SDKPATHNATIVE}${base_bindir_nativesdk}:${SDKPATHNATIVE}${base_sbindir_nativesdk}:$PATH"' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
echo 'export GIT_SSL_CAINFO="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
echo 'export SSL_CERT_FILE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script
diff --git a/meta/recipes-core/meta/cve-update-db-native.bb b/meta/recipes-core/meta/cve-update-db-native.bb
index 594bf947c8..efc32470d3 100644
--- a/meta/recipes-core/meta/cve-update-db-native.bb
+++ b/meta/recipes-core/meta/cve-update-db-native.bb
@@ -14,8 +14,15 @@ deltask do_populate_sysroot
# CVE database update interval, in seconds. By default: once a day (24*60*60).
# Use 0 to force the update
+# Use a negative value to skip the update
CVE_DB_UPDATE_INTERVAL ?= "86400"
+# Timeout for blocking socket operations, such as the connection attempt.
+CVE_SOCKET_TIMEOUT ?= "60"
+NVDCVE_URL ?= "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_1.1.db"
+
python () {
if not bb.data.inherits_class("cve-check", d):
raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
@@ -27,32 +34,24 @@ python do_fetch() {
"""
import bb.utils
import bb.progress
- import sqlite3, urllib, urllib.parse, shutil, gzip
- from datetime import date
+ import shutil
bb.utils.export_proxies(d)
- BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
- YEAR_START = 2002
-
db_file = d.getVar("CVE_CHECK_DB_FILE")
db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
- if os.path.exists("{0}-journal".format(db_file)):
- # If a journal is present the last update might have been interrupted. In that case,
- # just wipe any leftovers and force the DB to be recreated.
- os.remove("{0}-journal".format(db_file))
-
- if os.path.exists(db_file):
- os.remove(db_file)
+ cleanup_db_download(db_file, db_tmp_file)
# The NVD database changes once a day, so no need to update more frequently
# Allow the user to force-update
try:
import time
update_interval = int(d.getVar("CVE_DB_UPDATE_INTERVAL"))
- if (update_interval < 0):
- update_interval = 0
+ if update_interval < 0:
+ bb.note("CVE database update skipped")
+ return
if time.time() - os.path.getmtime(db_file) < update_interval:
return
@@ -60,28 +59,81 @@ python do_fetch() {
pass
bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.note("CVE database update failed")
+ os.remove(db_tmp_file)
+}
- # Connect to database
- conn = sqlite3.connect(db_file)
- c = conn.cursor()
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
- initialize_db(c)
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
+ if os.path.exists("{0}-journal".format(db_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_file))
+
+ if os.path.exists(db_file):
+ os.remove(db_file)
+
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
+
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
+
+def update_db_file(db_tmp_file, d):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ from datetime import date
+ import urllib, gzip, sqlite3
+
+ YEAR_START = 2002
+ cve_socket_timeout = int(d.getVar("CVE_SOCKET_TIMEOUT"))
+
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
total_years = date.today().year + 1 - YEAR_START
for i, year in enumerate(range(YEAR_START, date.today().year + 1)):
+ bb.debug(2, "Updating %d" % year)
ph.update((float(i + 1) / total_years) * 100)
- year_url = BASE_URL + str(year)
+ year_url = (d.getVar('NVDCVE_URL')) + str(year)
meta_url = year_url + ".meta"
json_url = year_url + ".json.gz"
# Retrieve meta last modified date
try:
- response = urllib.request.urlopen(meta_url)
+ response = urllib.request.urlopen(meta_url, timeout=cve_socket_timeout)
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, Unable to fetch CVE data.\n\n')
- bb.warn("Failed to fetch CVE data (%s)" % e.reason)
- return
+ bb.warn("Failed to fetch CVE data (%s)" % e)
+ import socket
+ result = socket.getaddrinfo("nvd.nist.gov", 443, proto=socket.IPPROTO_TCP)
+ bb.warn("Host IPs are %s" % (", ".join(t[4][0] for t in result)))
+ return False
if response:
for l in response.read().decode("utf-8").splitlines():
@@ -91,53 +143,58 @@ python do_fetch() {
break
else:
bb.warn("Cannot parse CVE metadata, update failed")
- return
+ return False
# Compare with current db last modified date
- c.execute("select DATE from META where YEAR = ?", (year,))
- meta = c.fetchone()
+ cursor = conn.execute("select DATE from META where YEAR = ?", (year,))
+ meta = cursor.fetchone()
+ cursor.close()
+
if not meta or meta[0] != last_modified:
+ bb.debug(2, "Updating entries")
# Clear products table entries corresponding to current year
- c.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,))
+ conn.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,)).close()
# Update db with current year json file
try:
- response = urllib.request.urlopen(json_url)
+ response = urllib.request.urlopen(json_url, timeout=cve_socket_timeout)
if response:
- update_db(c, gzip.decompress(response.read()).decode('utf-8'))
- c.execute("insert or replace into META values (?, ?)", [year, last_modified])
+ update_db(conn, gzip.decompress(response.read()).decode('utf-8'))
+ conn.execute("insert or replace into META values (?, ?)", [year, last_modified]).close()
except urllib.error.URLError as e:
cve_f.write('Warning: CVE db update error, CVE data is outdated.\n\n')
bb.warn("Cannot parse CVE data (%s), update failed" % e.reason)
- return
-
+ return False
+ else:
+ bb.debug(2, "Already up to date (last modified %s)" % last_modified)
# Update success, set the date to cve_check file.
if year == date.today().year:
cve_f.write('CVE database update : %s\n\n' % date.today())
conn.commit()
conn.close()
-}
+ return True
-do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
-do_fetch[file-checksums] = ""
-do_fetch[vardeps] = ""
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
-def initialize_db(c):
- c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
- c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
- SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
- c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
- VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
- VERSION_END TEXT, OPERATOR_END TEXT)")
- c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+ c.close()
-def parse_node_and_insert(c, node, cveId):
+def parse_node_and_insert(conn, node, cveId):
# Parse children node if needed
for child in node.get('children', ()):
- parse_node_and_insert(c, child, cveId)
+ parse_node_and_insert(conn, child, cveId)
def cpe_generator():
for cpe in node.get('cpe_match', ()):
@@ -194,9 +251,9 @@ def parse_node_and_insert(c, node, cveId):
# Save processing by representing as -.
yield [cveId, vendor, product, '-', '', '', '']
- c.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator())
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
-def update_db(c, jsondata):
+def update_db(conn, jsondata):
import json
root = json.loads(jsondata)
@@ -220,12 +277,12 @@ def update_db(c, jsondata):
accessVector = accessVector or "UNKNOWN"
cvssv3 = 0.0
- c.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
- [cveId, cveDesc, cvssv2, cvssv3, date, accessVector])
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
configurations = elt['configurations']['nodes']
for config in configurations:
- parse_node_and_insert(c, config, cveId)
+ parse_node_and_insert(conn, config, cveId)
do_fetch[nostamp] = "1"
diff --git a/meta/recipes-core/meta/cve-update-nvd2-native.bb b/meta/recipes-core/meta/cve-update-nvd2-native.bb
new file mode 100644
index 0000000000..1a3eeba6d0
--- /dev/null
+++ b/meta/recipes-core/meta/cve-update-nvd2-native.bb
@@ -0,0 +1,372 @@
+SUMMARY = "Updates the NVD CVE database"
+LICENSE = "MIT"
+
+# Important note:
+# This product uses the NVD API but is not endorsed or certified by the NVD.
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+inherit native
+
+deltask do_unpack
+deltask do_patch
+deltask do_configure
+deltask do_compile
+deltask do_install
+deltask do_populate_sysroot
+
+NVDCVE_URL ?= "https://services.nvd.nist.gov/rest/json/cves/2.0"
+
+# If you have a NVD API key (https://nvd.nist.gov/developers/request-an-api-key)
+# then setting this to get higher rate limits.
+NVDCVE_API_KEY ?= ""
+
+# CVE database update interval, in seconds. By default: once a day (24*60*60).
+# Use 0 to force the update
+# Use a negative value to skip the update
+CVE_DB_UPDATE_INTERVAL ?= "86400"
+
+# CVE database incremental update age threshold, in seconds. If the database is
+# older than this threshold, do a full re-download, else, do an incremental
+# update. By default: the maximum allowed value from NVD: 120 days (120*24*60*60)
+# Use 0 to force a full download.
+CVE_DB_INCR_UPDATE_AGE_THRES ?= "10368000"
+
+# Number of attempts for each http query to nvd server before giving up
+CVE_DB_UPDATE_ATTEMPTS ?= "5"
+
+CVE_DB_TEMP_FILE ?= "${CVE_CHECK_DB_DIR}/temp_nvdcve_2.db"
+
+python () {
+ if not bb.data.inherits_class("cve-check", d):
+ raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.")
+}
+
+python do_fetch() {
+ """
+ Update NVD database with API 2.0
+ """
+ import bb.utils
+ import bb.progress
+ import shutil
+
+ bb.utils.export_proxies(d)
+
+ db_file = d.getVar("CVE_CHECK_DB_FILE")
+ db_dir = os.path.dirname(db_file)
+ db_tmp_file = d.getVar("CVE_DB_TEMP_FILE")
+
+ cleanup_db_download(db_file, db_tmp_file)
+ # By default let's update the whole database (since time 0)
+ database_time = 0
+
+ # The NVD database changes once a day, so no need to update more frequently
+ # Allow the user to force-update
+ try:
+ import time
+ update_interval = int(d.getVar("CVE_DB_UPDATE_INTERVAL"))
+ if update_interval < 0:
+ bb.note("CVE database update skipped")
+ return
+ if time.time() - os.path.getmtime(db_file) < update_interval:
+ bb.note("CVE database recently updated, skipping")
+ return
+ database_time = os.path.getmtime(db_file)
+
+ except OSError:
+ pass
+
+ bb.utils.mkdirhier(db_dir)
+ if os.path.exists(db_file):
+ shutil.copy2(db_file, db_tmp_file)
+
+ if update_db_file(db_tmp_file, d, database_time) == True:
+ # Update downloaded correctly, can swap files
+ shutil.move(db_tmp_file, db_file)
+ else:
+ # Update failed, do not modify the database
+ bb.warn("CVE database update failed")
+ os.remove(db_tmp_file)
+}
+
+do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}"
+do_fetch[file-checksums] = ""
+do_fetch[vardeps] = ""
+
+def cleanup_db_download(db_file, db_tmp_file):
+ """
+ Cleanup the download space from possible failed downloads
+ """
+
+ # Clean up the updates done on the main file
+ # Remove it only if a journal file exists - it means a complete re-download
+ if os.path.exists("{0}-journal".format(db_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_file))
+
+ if os.path.exists(db_file):
+ os.remove(db_file)
+
+ # Clean-up the temporary file downloads, we can remove both journal
+ # and the temporary database
+ if os.path.exists("{0}-journal".format(db_tmp_file)):
+ # If a journal is present the last update might have been interrupted. In that case,
+ # just wipe any leftovers and force the DB to be recreated.
+ os.remove("{0}-journal".format(db_tmp_file))
+
+ if os.path.exists(db_tmp_file):
+ os.remove(db_tmp_file)
+
+def nvd_request_wait(attempt, min_wait):
+ return min ( ( (2 * attempt) + min_wait ) , 30)
+
+def nvd_request_next(url, attempts, api_key, args, min_wait):
+ """
+ Request next part of the NVD database
+ NVD API documentation: https://nvd.nist.gov/developers/vulnerabilities
+ """
+
+ import urllib.request
+ import urllib.parse
+ import gzip
+ import http
+ import time
+
+ request = urllib.request.Request(url + "?" + urllib.parse.urlencode(args))
+ if api_key:
+ request.add_header("apiKey", api_key)
+ bb.note("Requesting %s" % request.full_url)
+
+ for attempt in range(attempts):
+ try:
+ r = urllib.request.urlopen(request)
+
+ if (r.headers['content-encoding'] == 'gzip'):
+ buf = r.read()
+ raw_data = gzip.decompress(buf).decode("utf-8")
+ else:
+ raw_data = r.read().decode("utf-8")
+
+ r.close()
+
+ except Exception as e:
+ wait_time = nvd_request_wait(attempt, min_wait)
+ bb.note("CVE database: received error (%s)" % (e))
+ bb.note("CVE database: retrying download after %d seconds. attempted (%d/%d)" % (wait_time, attempt+1, attempts))
+ time.sleep(wait_time)
+ pass
+ else:
+ return raw_data
+ else:
+ # We failed at all attempts
+ return None
+
+def update_db_file(db_tmp_file, d, database_time):
+ """
+ Update the given database file
+ """
+ import bb.utils, bb.progress
+ import datetime
+ import sqlite3
+ import json
+
+ # Connect to database
+ conn = sqlite3.connect(db_tmp_file)
+ initialize_db(conn)
+
+ req_args = {'startIndex' : 0}
+
+ incr_update_threshold = int(d.getVar("CVE_DB_INCR_UPDATE_AGE_THRES"))
+ if database_time != 0:
+ database_date = datetime.datetime.fromtimestamp(database_time, tz=datetime.timezone.utc)
+ today_date = datetime.datetime.now(tz=datetime.timezone.utc)
+ delta = today_date - database_date
+ if incr_update_threshold == 0:
+ bb.note("CVE database: forced full update")
+ elif delta < datetime.timedelta(seconds=incr_update_threshold):
+ bb.note("CVE database: performing partial update")
+ # The maximum range for time is 120 days
+ if delta > datetime.timedelta(days=120):
+ bb.error("CVE database: Trying to do an incremental update on a larger than supported range")
+ req_args['lastModStartDate'] = database_date.isoformat()
+ req_args['lastModEndDate'] = today_date.isoformat()
+ else:
+ bb.note("CVE database: file too old, forcing a full update")
+ else:
+ bb.note("CVE database: no preexisting database, do a full download")
+
+ with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f:
+
+ bb.note("Updating entries")
+ index = 0
+ url = d.getVar("NVDCVE_URL")
+ api_key = d.getVar("NVDCVE_API_KEY") or None
+ attempts = int(d.getVar("CVE_DB_UPDATE_ATTEMPTS"))
+
+ # Recommended by NVD
+ wait_time = 6
+ if api_key:
+ wait_time = 2
+
+ while True:
+ req_args['startIndex'] = index
+ raw_data = nvd_request_next(url, attempts, api_key, req_args, wait_time)
+ if raw_data is None:
+ # We haven't managed to download data
+ return False
+
+ data = json.loads(raw_data)
+
+ index = data["startIndex"]
+ total = data["totalResults"]
+ per_page = data["resultsPerPage"]
+ bb.note("Got %d entries" % per_page)
+ for cve in data["vulnerabilities"]:
+ update_db(conn, cve)
+
+ index += per_page
+ ph.update((float(index) / (total+1)) * 100)
+ if index >= total:
+ break
+
+ # Recommended by NVD
+ time.sleep(wait_time)
+
+ # Update success, set the date to cve_check file.
+ cve_f.write('CVE database update : %s\n\n' % datetime.date.today())
+
+ conn.commit()
+ conn.close()
+ return True
+
+def initialize_db(conn):
+ with conn:
+ c = conn.cursor()
+
+ c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS NVD (ID TEXT UNIQUE, SUMMARY TEXT, \
+ SCOREV2 TEXT, SCOREV3 TEXT, MODIFIED INTEGER, VECTOR TEXT)")
+
+ c.execute("CREATE TABLE IF NOT EXISTS PRODUCTS (ID TEXT, \
+ VENDOR TEXT, PRODUCT TEXT, VERSION_START TEXT, OPERATOR_START TEXT, \
+ VERSION_END TEXT, OPERATOR_END TEXT)")
+ c.execute("CREATE INDEX IF NOT EXISTS PRODUCT_ID_IDX on PRODUCTS(ID);")
+
+ c.close()
+
+def parse_node_and_insert(conn, node, cveId):
+
+ def cpe_generator():
+ for cpe in node.get('cpeMatch', ()):
+ if not cpe['vulnerable']:
+ return
+ cpe23 = cpe.get('criteria')
+ if not cpe23:
+ return
+ cpe23 = cpe23.split(':')
+ if len(cpe23) < 6:
+ return
+ vendor = cpe23[3]
+ product = cpe23[4]
+ version = cpe23[5]
+
+ if cpe23[6] == '*' or cpe23[6] == '-':
+ version_suffix = ""
+ else:
+ version_suffix = "_" + cpe23[6]
+
+ if version != '*' and version != '-':
+ # Version is defined, this is a '=' match
+ yield [cveId, vendor, product, version + version_suffix, '=', '', '']
+ elif version == '-':
+ # no version information is available
+ yield [cveId, vendor, product, version, '', '', '']
+ else:
+ # Parse start version, end version and operators
+ op_start = ''
+ op_end = ''
+ v_start = ''
+ v_end = ''
+
+ if 'versionStartIncluding' in cpe:
+ op_start = '>='
+ v_start = cpe['versionStartIncluding']
+
+ if 'versionStartExcluding' in cpe:
+ op_start = '>'
+ v_start = cpe['versionStartExcluding']
+
+ if 'versionEndIncluding' in cpe:
+ op_end = '<='
+ v_end = cpe['versionEndIncluding']
+
+ if 'versionEndExcluding' in cpe:
+ op_end = '<'
+ v_end = cpe['versionEndExcluding']
+
+ if op_start or op_end or v_start or v_end:
+ yield [cveId, vendor, product, v_start, op_start, v_end, op_end]
+ else:
+ # This is no version information, expressed differently.
+ # Save processing by representing as -.
+ yield [cveId, vendor, product, '-', '', '', '']
+
+ conn.executemany("insert into PRODUCTS values (?, ?, ?, ?, ?, ?, ?)", cpe_generator()).close()
+
+def update_db(conn, elt):
+ """
+ Update a single entry in the on-disk database
+ """
+
+ accessVector = None
+ cveId = elt['cve']['id']
+ if elt['cve']['vulnStatus'] == "Rejected":
+ c = conn.cursor()
+ c.execute("delete from PRODUCTS where ID = ?;", [cveId])
+ c.execute("delete from NVD where ID = ?;", [cveId])
+ c.close()
+ return
+ cveDesc = ""
+ for desc in elt['cve']['descriptions']:
+ if desc['lang'] == 'en':
+ cveDesc = desc['value']
+ date = elt['cve']['lastModified']
+ try:
+ accessVector = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['accessVector']
+ cvssv2 = elt['cve']['metrics']['cvssMetricV2'][0]['cvssData']['baseScore']
+ except KeyError:
+ cvssv2 = 0.0
+ cvssv3 = None
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['attackVector']
+ cvssv3 = elt['cve']['metrics']['cvssMetricV30'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ try:
+ accessVector = accessVector or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['attackVector']
+ cvssv3 = cvssv3 or elt['cve']['metrics']['cvssMetricV31'][0]['cvssData']['baseScore']
+ except KeyError:
+ pass
+ accessVector = accessVector or "UNKNOWN"
+ cvssv3 = cvssv3 or 0.0
+
+ conn.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)",
+ [cveId, cveDesc, cvssv2, cvssv3, date, accessVector]).close()
+
+ try:
+ # Remove any pre-existing CVE configuration. Even for partial database
+ # update, those will be repopulated. This ensures that old
+ # configuration is not kept for an updated CVE.
+ conn.execute("delete from PRODUCTS where ID = ?", [cveId]).close()
+ for config in elt['cve']['configurations']:
+ # This is suboptimal as it doesn't handle AND/OR and negate, but is better than nothing
+ for node in config["nodes"]:
+ parse_node_and_insert(conn, node, cveId)
+ except KeyError:
+ bb.note("CVE %s has no configurations" % cveId)
+
+do_fetch[nostamp] = "1"
+
+EXCLUDE_FROM_WORLD = "1"
diff --git a/meta/recipes-core/ncurses/files/CVE-2022-29458.patch b/meta/recipes-core/ncurses/files/CVE-2022-29458.patch
new file mode 100644
index 0000000000..eb1b7c96f9
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2022-29458.patch
@@ -0,0 +1,135 @@
+From 5f40697e37e195069f55528fc7a1d77e619ad104 Mon Sep 17 00:00:00 2001
+From: Dan Tran <dantran@microsoft.com>
+Date: Fri, 13 May 2022 13:28:41 -0700
+Subject: [PATCH] ncurses 6.3 before patch 20220416 has an out-of-bounds read
+ and segmentation violation in convert_strings in tinfo/read_entry.c in the
+ terminfo library.
+
+CVE: CVE-2022-29458
+Upstream-Status: Backport
+[https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1009870]
+
+Signed-off-by: Gustavo Lima Chaves <gustavo.chaves@microsoft.com>
+Signed-off-by: Dan Tran <dantran@microsoft.com>
+---
+ ncurses/tinfo/alloc_entry.c | 14 ++++++--------
+ ncurses/tinfo/read_entry.c | 25 +++++++++++++++++++------
+ 2 files changed, 25 insertions(+), 14 deletions(-)
+
+diff --git a/ncurses/tinfo/alloc_entry.c b/ncurses/tinfo/alloc_entry.c
+index 4bf7d6c8..b49ad6aa 100644
+--- a/ncurses/tinfo/alloc_entry.c
++++ b/ncurses/tinfo/alloc_entry.c
+@@ -48,13 +48,11 @@
+
+ #include <tic.h>
+
+-MODULE_ID("$Id: alloc_entry.c,v 1.64 2020/02/02 23:34:34 tom Exp $")
++MODULE_ID("$Id: alloc_entry.c,v 1.69 2022/04/16 22:46:53 tom Exp $")
+
+ #define ABSENT_OFFSET -1
+ #define CANCELLED_OFFSET -2
+
+-#define MAX_STRTAB 4096 /* documented maximum entry size */
+-
+ static char *stringbuf; /* buffer for string capabilities */
+ static size_t next_free; /* next free character in stringbuf */
+
+@@ -71,8 +69,8 @@ _nc_init_entry(ENTRY * const tp)
+ }
+ #endif
+
+- if (stringbuf == 0)
+- TYPE_MALLOC(char, (size_t) MAX_STRTAB, stringbuf);
++ if (stringbuf == NULL)
++ TYPE_MALLOC(char, (size_t) MAX_ENTRY_SIZE, stringbuf);
+
+ next_free = 0;
+
+@@ -108,11 +106,11 @@ _nc_save_str(const char *const string)
+ * Cheat a little by making an empty string point to the end of the
+ * previous string.
+ */
+- if (next_free < MAX_STRTAB) {
++ if (next_free < MAX_ENTRY_SIZE) {
+ result = (stringbuf + next_free - 1);
+ }
+- } else if (next_free + len < MAX_STRTAB) {
+- _nc_STRCPY(&stringbuf[next_free], string, MAX_STRTAB);
++ } else if (next_free + len < MAX_ENTRY_SIZE) {
++ _nc_STRCPY(&stringbuf[next_free], string, MAX_ENTRY_SIZE);
+ DEBUG(7, ("Saved string %s", _nc_visbuf(string)));
+ DEBUG(7, ("at location %d", (int) next_free));
+ next_free += len;
+diff --git a/ncurses/tinfo/read_entry.c b/ncurses/tinfo/read_entry.c
+index 5b570b0f..23c2cebc 100644
+--- a/ncurses/tinfo/read_entry.c
++++ b/ncurses/tinfo/read_entry.c
+@@ -1,5 +1,5 @@
+ /****************************************************************************
+- * Copyright 2018-2019,2020 Thomas E. Dickey *
++ * Copyright 2018-2021,2022 Thomas E. Dickey *
+ * Copyright 1998-2016,2017 Free Software Foundation, Inc. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining a *
+@@ -42,7 +42,7 @@
+
+ #include <tic.h>
+
+-MODULE_ID("$Id: read_entry.c,v 1.157 2020/02/02 23:34:34 tom Exp $")
++MODULE_ID("$Id: read_entry.c,v 1.162 2022/04/16 21:00:00 tom Exp $")
+
+ #define TYPE_CALLOC(type,elts) typeCalloc(type, (unsigned)(elts))
+
+@@ -145,6 +145,7 @@ convert_strings(char *buf, char **Strings, int count, int size, char *table)
+ {
+ int i;
+ char *p;
++ bool corrupt = FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (IS_NEG1(buf + 2 * i)) {
+@@ -154,8 +155,20 @@ convert_strings(char *buf, char **Strings, int count, int size, char *table)
+ } else if (MyNumber(buf + 2 * i) > size) {
+ Strings[i] = ABSENT_STRING;
+ } else {
+- Strings[i] = (MyNumber(buf + 2 * i) + table);
+- TR(TRACE_DATABASE, ("Strings[%d] = %s", i, _nc_visbuf(Strings[i])));
++ int nn = MyNumber(buf + 2 * i);
++ if (nn >= 0 && nn < size) {
++ Strings[i] = (nn + table);
++ TR(TRACE_DATABASE, ("Strings[%d] = %s", i,
++ _nc_visbuf(Strings[i])));
++ } else {
++ if (!corrupt) {
++ corrupt = TRUE;
++ TR(TRACE_DATABASE,
++ ("ignore out-of-range index %d to Strings[]", nn));
++ _nc_warning("corrupt data found in convert_strings");
++ }
++ Strings[i] = ABSENT_STRING;
++ }
+ }
+
+ /* make sure all strings are NUL terminated */
+@@ -776,7 +789,7 @@ _nc_read_tic_entry(char *filename,
+ * looking for compiled (binary) terminfo data.
+ *
+ * cgetent uses a two-level lookup. On the first it uses the given
+- * name to return a record containing only the aliases for an entry.
++ * name to return a record containing only the aliases for an entry.
+ * On the second (using that list of aliases as a key), it returns the
+ * content of the terminal description. We expect second lookup to
+ * return data beginning with the same set of aliases.
+@@ -833,7 +846,7 @@ _nc_read_tic_entry(char *filename,
+ #endif /* NCURSES_USE_DATABASE */
+
+ /*
+- * Find and read the compiled entry for a given terminal type, if it exists.
++ * Find and read the compiled entry for a given terminal type, if it exists.
+ * We take pains here to make sure no combination of environment variables and
+ * terminal type name can be used to overrun the file buffer.
+ */
+--
+2.36.1
+
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-29491.patch b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
new file mode 100644
index 0000000000..0a0497723f
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-29491.patch
@@ -0,0 +1,45 @@
+Backport of:
+
+Author: Sven Joachim <svenjoac@gmx.de>
+Description: Change the --disable-root-environ configure option behavior
+ By default, the --disable-root-environ option forbids program run by
+ the superuser to load custom terminfo entries. This patch changes
+ that to only restrict programs running with elevated privileges,
+ matching the behavior of the --disable-setuid-environ option
+ introduced in the 20230423 upstream patchlevel.
+Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1034372#29
+Bug: https://lists.gnu.org/archive/html/bug-ncurses/2023-04/msg00018.html
+Forwarded: not-needed
+Last-Update: 2023-05-01
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ncurses/6.2-0ubuntu2.1/ncurses_6.2-0ubuntu2.1.debian.tar.xz]
+CVE: CVE-2023-29491
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+
+---
+ ncurses/tinfo/access.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/ncurses/tinfo/access.c
++++ b/ncurses/tinfo/access.c
+@@ -178,15 +178,16 @@ _nc_is_file_path(const char *path)
+ NCURSES_EXPORT(int)
+ _nc_env_access(void)
+ {
++ int result = TRUE;
++
+ #if HAVE_ISSETUGID
+ if (issetugid())
+- return FALSE;
++ result = FALSE;
+ #elif HAVE_GETEUID && HAVE_GETEGID
+ if (getuid() != geteuid()
+ || getgid() != getegid())
+- return FALSE;
++ result = FALSE;
+ #endif
+- /* ...finally, disallow root */
+- return (getuid() != ROOT_UID) && (geteuid() != ROOT_UID);
++ return result;
+ }
+ #endif
diff --git a/meta/recipes-core/ncurses/files/CVE-2023-50495.patch b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
new file mode 100644
index 0000000000..58c23866d1
--- /dev/null
+++ b/meta/recipes-core/ncurses/files/CVE-2023-50495.patch
@@ -0,0 +1,79 @@
+Fix for CVE-2023-50495 from upstream:
+https://github.com/ThomasDickey/ncurses-snapshots/commit/efe9674ee14b14b788f9618941f97d31742f0adc
+
+Reference:
+https://invisible-island.net/archives/ncurses/6.4/ncurses-6.4-20230424.patch.gz
+
+Upstream-Status: Backport [import from suse ftp.pbone.net/mirror/ftp.opensuse.org/update/leap-micro/5.3/sle/src/ncurses-6.1-150000.5.20.1.src.rpm
+Upstream commit https://github.com/ThomasDickey/ncurses-snapshots/commit/efe9674ee14b14b788f9618941f97d31742f0adc]
+CVE: CVE-2023-50495
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ncurses/tinfo/parse_entry.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/ncurses/tinfo/parse_entry.c b/ncurses/tinfo/parse_entry.c
+index 23574b66..56ba9ae6 100644
+--- a/ncurses/tinfo/parse_entry.c
++++ b/ncurses/tinfo/parse_entry.c
+@@ -110,7 +110,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ /* Well, we are given a cancel for a name that we don't recognize */
+ return _nc_extend_names(entryp, name, STRING);
+ default:
+- return 0;
++ return NULL;
+ }
+
+ /* Adjust the 'offset' (insertion-point) to keep the lists of extended
+@@ -142,6 +142,11 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ for (last = (unsigned) (max - 1); last > tindex; last--)
+
+ if (!found) {
++ char *saved;
++
++ if ((saved = _nc_save_str(name)) == NULL)
++ return NULL;
++
+ switch (token_type) {
+ case BOOLEAN:
+ tp->ext_Booleans++;
+@@ -169,7 +174,7 @@ _nc_extend_names(ENTRY * entryp, const char *name, int token_type)
+ TYPE_REALLOC(char *, actual, tp->ext_Names);
+ while (--actual > offset)
+ tp->ext_Names[actual] = tp->ext_Names[actual - 1];
+- tp->ext_Names[offset] = _nc_save_str(name);
++ tp->ext_Names[offset] = saved;
+ }
+
+ temp.nte_name = tp->ext_Names[offset];
+@@ -337,6 +342,8 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ bool is_use = (strcmp(_nc_curr_token.tk_name, "use") == 0);
+ bool is_tc = !is_use && (strcmp(_nc_curr_token.tk_name, "tc") == 0);
+ if (is_use || is_tc) {
++ char *saved;
++
+ if (!VALID_STRING(_nc_curr_token.tk_valstring)
+ || _nc_curr_token.tk_valstring[0] == '\0') {
+ _nc_warning("missing name for use-clause");
+@@ -350,11 +357,13 @@ _nc_parse_entry(ENTRY * entryp, int literal, bool silent)
+ _nc_curr_token.tk_valstring);
+ continue;
+ }
+- entryp->uses[entryp->nuses].name = _nc_save_str(_nc_curr_token.tk_valstring);
+- entryp->uses[entryp->nuses].line = _nc_curr_line;
+- entryp->nuses++;
+- if (entryp->nuses > 1 && is_tc) {
+- BAD_TC_USAGE
++ if ((saved = _nc_save_str(_nc_curr_token.tk_valstring)) != NULL) {
++ entryp->uses[entryp->nuses].name = saved;
++ entryp->uses[entryp->nuses].line = _nc_curr_line;
++ entryp->nuses++;
++ if (entryp->nuses > 1 && is_tc) {
++ BAD_TC_USAGE
++ }
+ }
+ } else {
+ /* normal token lookup */
+--
+2.25.1
+
diff --git a/meta/recipes-core/ncurses/ncurses_6.2.bb b/meta/recipes-core/ncurses/ncurses_6.2.bb
index 700464f70b..dbff149f55 100644
--- a/meta/recipes-core/ncurses/ncurses_6.2.bb
+++ b/meta/recipes-core/ncurses/ncurses_6.2.bb
@@ -4,11 +4,14 @@ SRC_URI += "file://0001-tic-hang.patch \
file://0002-configure-reproducible.patch \
file://0003-gen-pkgconfig.in-Do-not-include-LDFLAGS-in-generated.patch \
file://CVE-2021-39537.patch \
+ file://CVE-2022-29458.patch \
+ file://CVE-2023-29491.patch \
+ file://CVE-2023-50495.patch \
"
# commit id corresponds to the revision in package version
SRCREV = "a669013cd5e9d6434e5301348ea51baf306c93c4"
S = "${WORKDIR}/git"
-EXTRA_OECONF += "--with-abi-version=5"
+EXTRA_OECONF += "--with-abi-version=5 --disable-root-environ"
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+(\.\d+)+(\+\d+)*)"
# This is needed when using patchlevel versions like 6.1+20181013
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch
new file mode 100644
index 0000000000..4418d52898
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-genffs-fix-gcc12-warning.patch
@@ -0,0 +1,49 @@
+From 7b005f344e533cd913c3ca05b266f9872df886d1 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Thu, 24 Mar 2022 20:04:34 +0800
+Subject: [PATCH] BaseTools: fix gcc12 warning
+
+GenFfs.c:545:5: error: pointer ?InFileHandle? used after ?fclose? [-Werror=use-after-free]
+ 545 | Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+GenFfs.c:544:5: note: call to ?fclose? here
+ 544 | fclose (InFileHandle);
+ | ^~~~~~~~~~~~~~~~~~~~~
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/7b005f344e533cd913c3ca05b266f9872df886d1]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ BaseTools/Source/C/GenFfs/GenFfs.c | 2 +-
+ BaseTools/Source/C/GenSec/GenSec.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/BaseTools/Source/C/GenFfs/GenFfs.c b/BaseTools/Source/C/GenFfs/GenFfs.c
+index 949025c33325..d78d62ab3689 100644
+--- a/BaseTools/Source/C/GenFfs/GenFfs.c
++++ b/BaseTools/Source/C/GenFfs/GenFfs.c
+@@ -542,7 +542,7 @@ GetAlignmentFromFile(char *InFile, UINT32 *Alignment)
+ PeFileBuffer = (UINT8 *) malloc (PeFileSize);
+ if (PeFileBuffer == NULL) {
+ fclose (InFileHandle);
+- Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
++ Error(NULL, 0, 4001, "Resource", "memory cannot be allocated for %s", InFile);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ fread (PeFileBuffer, sizeof (UINT8), PeFileSize, InFileHandle);
+diff --git a/BaseTools/Source/C/GenSec/GenSec.c b/BaseTools/Source/C/GenSec/GenSec.c
+index d54a4f9e0a7d..b1d05367ec0b 100644
+--- a/BaseTools/Source/C/GenSec/GenSec.c
++++ b/BaseTools/Source/C/GenSec/GenSec.c
+@@ -1062,7 +1062,7 @@ GetAlignmentFromFile(char *InFile, UINT32 *Alignment)
+ PeFileBuffer = (UINT8 *) malloc (PeFileSize);
+ if (PeFileBuffer == NULL) {
+ fclose (InFileHandle);
+- Error(NULL, 0, 4001, "Resource", "memory cannot be allocated of %s", InFileHandle);
++ Error(NULL, 0, 4001, "Resource", "memory cannot be allocated for %s", InFile);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ fread (PeFileBuffer, sizeof (UINT8), PeFileSize, InFileHandle);
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch
new file mode 100644
index 0000000000..a6ef87aa79
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-lzmaenc-fix-gcc12-warning.patch
@@ -0,0 +1,53 @@
+From 24551a99d1f765c891a4dc21a36f18ccbf56e612 Mon Sep 17 00:00:00 2001
+From: Steve Sakoman <steve@sakoman.com>
+Date: Tue, 10 Jan 2023 06:15:00 -1000
+Subject: [PATCH] BaseTools: fix gcc12 warning
+
+Sdk/C/LzmaEnc.c: In function ?LzmaEnc_CodeOneMemBlock?:
+Sdk/C/LzmaEnc.c:2828:19: error: storing the address of local variable ?outStream? in ?*p.rc.outStream? [-Werror=dangling-pointer=]
+ 2828 | p->rc.outStream = &outStream.vt;
+ | ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?outStream? declared here
+ 2811 | CLzmaEnc_SeqOutStreamBuf outStream;
+ | ^~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?pp? declared here
+Sdk/C/LzmaEnc.c:2828:19: error: storing the address of local variable ?outStream? in ?*(CLzmaEnc *)pp.rc.outStream? [-Werror=dangling-pointer=]
+ 2828 | p->rc.outStream = &outStream.vt;
+ | ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?outStream? declared here
+ 2811 | CLzmaEnc_SeqOutStreamBuf outStream;
+ | ^~~~~~~~~
+Sdk/C/LzmaEnc.c:2811:28: note: ?pp? declared here
+cc1: all warnings being treated as errors
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/85021f8cf22d1bd4114803c6c610dea5ef0059f1]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+---
+ BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c b/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
+index e281716fee..b575c4f888 100644
+--- a/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
++++ b/BaseTools/Source/C/LzmaCompress/Sdk/C/LzmaEnc.c
+@@ -2638,12 +2638,13 @@ SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+
+ nowPos64 = p->nowPos64;
+ RangeEnc_Init(&p->rc);
+- p->rc.outStream = &outStream.vt;
+
+ if (desiredPackSize == 0)
+ return SZ_ERROR_OUTPUT_EOF;
+
++ p->rc.outStream = &outStream.vt;
+ res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
++ p->rc.outStream = NULL;
+
+ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
+ *destLen -= outStream.rem;
+--
+2.25.1
+
diff --git a/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch b/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch
new file mode 100644
index 0000000000..73a432684c
--- /dev/null
+++ b/meta/recipes-core/ovmf/ovmf/0001-Basetools-turn-off-gcc12-warning.patch
@@ -0,0 +1,41 @@
+From 22130dcd98b4d4b76ac8d922adb4a2dbc86fa52c Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Thu, 24 Mar 2022 20:04:36 +0800
+Subject: [PATCH] Basetools: turn off gcc12 warning
+
+In function ?SetDevicePathEndNode?,
+ inlined from ?FileDevicePath? at DevicePathUtilities.c:857:5:
+DevicePathUtilities.c:321:3: error: writing 4 bytes into a region of size 1 [-Werror=stringop-overflow=]
+ 321 | memcpy (Node, &mUefiDevicePathLibEndDevicePath, sizeof (mUefiDevicePathLibEndDevicePath));
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In file included from UefiDevicePathLib.h:22,
+ from DevicePathUtilities.c:16:
+../Include/Protocol/DevicePath.h: In function ?FileDevicePath?:
+../Include/Protocol/DevicePath.h:51:9: note: destination object ?Type? of size 1
+ 51 | UINT8 Type; ///< 0x01 Hardware Device Path.
+ | ^~~~
+
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reviewed-by: Bob Feng <bob.c.feng@intel.com>
+
+Upstream-Status: Backport [https://github.com/tianocore/edk2/commit/22130dcd98b4d4b76ac8d922adb4a2dbc86fa52c]
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+
+---
+ BaseTools/Source/C/DevicePath/GNUmakefile | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/BaseTools/Source/C/DevicePath/GNUmakefile b/BaseTools/Source/C/DevicePath/GNUmakefile
+index 7ca08af9662d..b05d2bddfa68 100644
+--- a/BaseTools/Source/C/DevicePath/GNUmakefile
++++ b/BaseTools/Source/C/DevicePath/GNUmakefile
+@@ -13,6 +13,9 @@ OBJECTS = DevicePath.o UefiDevicePathLib.o DevicePathFromText.o DevicePathUtili
+
+ include $(MAKEROOT)/Makefiles/app.makefile
+
++# gcc 12 trips over device path handling
++BUILD_CFLAGS += -Wno-error=stringop-overflow
++
+ LIBS = -lCommon
+ ifeq ($(CYGWIN), CYGWIN)
+ LIBS += -L/lib/e2fsprogs -luuid
diff --git a/meta/recipes-core/ovmf/ovmf_git.bb b/meta/recipes-core/ovmf/ovmf_git.bb
index b00119313b..a487f77e3c 100644
--- a/meta/recipes-core/ovmf/ovmf_git.bb
+++ b/meta/recipes-core/ovmf/ovmf_git.bb
@@ -18,6 +18,9 @@ SRC_URI = "gitsm://github.com/tianocore/edk2.git;branch=master;protocol=https \
file://0003-ovmf-enable-long-path-file.patch \
file://0004-ovmf-Update-to-latest.patch \
file://0001-Fix-VLA-parameter-warning.patch \
+ file://0001-Basetools-genffs-fix-gcc12-warning.patch \
+ file://0001-Basetools-lzmaenc-fix-gcc12-warning.patch \
+ file://0001-Basetools-turn-off-gcc12-warning.patch \
"
PV = "edk2-stable202008"
diff --git a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
index 5ec3f6c927..5523f874db 100644
--- a/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
+++ b/meta/recipes-core/packagegroups/packagegroup-core-ssh-dropbear.bb
@@ -4,3 +4,4 @@ PR = "r1"
inherit packagegroup
RDEPENDS_${PN} = "dropbear"
+RRECOMMENDS_${PN} = "openssh-sftp-server"
diff --git a/meta/recipes-core/psplash/files/psplash-start.service b/meta/recipes-core/psplash/files/psplash-start.service
index 36c2bb38e0..bec9368427 100644
--- a/meta/recipes-core/psplash/files/psplash-start.service
+++ b/meta/recipes-core/psplash/files/psplash-start.service
@@ -2,6 +2,7 @@
Description=Start psplash boot splash screen
DefaultDependencies=no
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
Type=notify
diff --git a/meta/recipes-core/psplash/files/psplash-systemd.service b/meta/recipes-core/psplash/files/psplash-systemd.service
index 082207f232..e93e3deb35 100644
--- a/meta/recipes-core/psplash/files/psplash-systemd.service
+++ b/meta/recipes-core/psplash/files/psplash-systemd.service
@@ -4,6 +4,7 @@ DefaultDependencies=no
After=psplash-start.service
Requires=psplash-start.service
RequiresMountsFor=/run
+ConditionFileIsExecutable=/usr/bin/psplash
[Service]
ExecStart=/usr/bin/psplash-systemd
diff --git a/meta/recipes-core/systemd/systemd-systemctl/systemctl b/meta/recipes-core/systemd/systemd-systemctl/systemctl
index 990de1ab39..e003c860e3 100755
--- a/meta/recipes-core/systemd/systemd-systemctl/systemctl
+++ b/meta/recipes-core/systemd/systemd-systemctl/systemctl
@@ -11,6 +11,7 @@ import re
import sys
from collections import namedtuple
+from itertools import chain
from pathlib import Path
version = 1.0
@@ -25,12 +26,16 @@ locations = list()
class SystemdFile():
"""Class representing a single systemd configuration file"""
- def __init__(self, root, path):
+ def __init__(self, root, path, instance_unit_name):
self.sections = dict()
self._parse(root, path)
dirname = os.path.basename(path.name) + ".d"
for location in locations:
- for path2 in sorted((root / location / "system" / dirname).glob("*.conf")):
+ files = (root / location / "system" / dirname).glob("*.conf")
+ if instance_unit_name:
+ inst_dirname = instance_unit_name + ".d"
+ files = chain(files, (root / location / "system" / inst_dirname).glob("*.conf"))
+ for path2 in sorted(files):
self._parse(root, path2)
def _parse(self, root, path):
@@ -177,12 +182,14 @@ class SystemdUnit():
raise SystemdUnitNotFoundError(self.root, unit)
- def _process_deps(self, config, service, location, prop, dirstem):
+ def _process_deps(self, config, service, location, prop, dirstem, instance):
systemdir = self.root / SYSCONFDIR / "systemd" / "system"
target = ROOT / location.relative_to(self.root)
try:
for dependent in config.get('Install', prop):
+ # expand any %i to instance (ignoring escape sequence %%)
+ dependent = re.sub("([^%](%%)*)%i", "\\g<1>{}".format(instance), dependent)
wants = systemdir / "{}.{}".format(dependent, dirstem) / service
add_link(wants, target)
@@ -193,8 +200,11 @@ class SystemdUnit():
# if we're enabling an instance, first extract the actual instance
# then figure out what the template unit is
template = re.match(r"[^@]+@(?P<instance>[^\.]*)\.", self.unit)
+ instance_unit_name = None
if template:
instance = template.group('instance')
+ if instance != "":
+ instance_unit_name = self.unit
unit = re.sub(r"@[^\.]*\.", "@.", self.unit, 1)
else:
instance = None
@@ -206,7 +216,7 @@ class SystemdUnit():
# ignore aliases
return
- config = SystemdFile(self.root, path)
+ config = SystemdFile(self.root, path, instance_unit_name)
if instance == "":
try:
default_instance = config.get('Install', 'DefaultInstance')[0]
@@ -219,8 +229,8 @@ class SystemdUnit():
else:
service = self.unit
- self._process_deps(config, service, path, 'WantedBy', 'wants')
- self._process_deps(config, service, path, 'RequiredBy', 'requires')
+ self._process_deps(config, service, path, 'WantedBy', 'wants', instance)
+ self._process_deps(config, service, path, 'RequiredBy', 'requires', instance)
try:
for also in config.get('Install', 'Also'):
diff --git a/meta/recipes-core/systemd/systemd/00-create-volatile.conf b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
index 87cbe1e7d3..c4277221a2 100644
--- a/meta/recipes-core/systemd/systemd/00-create-volatile.conf
+++ b/meta/recipes-core/systemd/systemd/00-create-volatile.conf
@@ -3,5 +3,6 @@
# inside /var/log.
+d /run/lock 1777 - - -
d /var/volatile/log - - - -
d /var/volatile/tmp 1777 - -
diff --git a/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch b/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch
new file mode 100644
index 0000000000..8d3801a248
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2018-21029.patch
@@ -0,0 +1,120 @@
+From 3f9d9289ee8730a81a0464539f4e1ba2d23d0ce9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= <joerg@thalheim.io>
+Date: Tue, 3 Mar 2020 23:31:25 +0000
+Subject: [PATCH] systemd-resolved: use hostname for certificate validation in
+ DoT
+
+Widely accepted certificates for IP addresses are expensive and only
+affordable for larger organizations. Therefore if the user provides
+the hostname in the DNS= option, we should use it instead of the IP
+address.
+
+(cherry picked from commit eec394f10bbfcc3d2fc8504ad8ff5be44231abd5)
+
+CVE: CVE-2018-21029
+Upstream-Status: Backport [ff26d281aec0877b43269f18c6282cd79a7f5529]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ man/resolved.conf.xml | 16 +++++++++++-----
+ src/resolve/resolved-dnstls-gnutls.c | 20 ++++++++++++--------
+ src/resolve/resolved-dnstls-openssl.c | 15 +++++++++++----
+ 3 files changed, 34 insertions(+), 17 deletions(-)
+
+diff --git a/man/resolved.conf.xml b/man/resolved.conf.xml
+index 818000145b..37161ebcbc 100644
+--- a/man/resolved.conf.xml
++++ b/man/resolved.conf.xml
+@@ -193,11 +193,17 @@
+ <varlistentry>
+ <term><varname>DNSOverTLS=</varname></term>
+ <listitem>
+- <para>Takes a boolean argument or <literal>opportunistic</literal>.
+- If true all connections to the server will be encrypted. Note that
+- this mode requires a DNS server that supports DNS-over-TLS and has
+- a valid certificate for it's IP. If the DNS server does not support
+- DNS-over-TLS all DNS requests will fail. When set to <literal>opportunistic</literal>
++ <para>Takes a boolean argument or <literal>opportunistic</literal>. If
++ true all connections to the server will be encrypted. Note that this
++ mode requires a DNS server that supports DNS-over-TLS and has a valid
++ certificate. If the hostname was specified in <varname>DNS=</varname>
++ by using the format format <literal>address#server_name</literal> it
++ is used to validate its certificate and also to enable Server Name
++ Indication (SNI) when opening a TLS connection. Otherwise
++ the certificate is checked against the server's IP.
++ If the DNS server does not support DNS-over-TLS all DNS requests will fail.</para>
++
++ <para>When set to <literal>opportunistic</literal>
+ DNS request are attempted to send encrypted with DNS-over-TLS.
+ If the DNS server does not support TLS, DNS-over-TLS is disabled.
+ Note that this mode makes DNS-over-TLS vulnerable to "downgrade"
+diff --git a/src/resolve/resolved-dnstls-gnutls.c b/src/resolve/resolved-dnstls-gnutls.c
+index ed0a31e8bf..c7215723a7 100644
+--- a/src/resolve/resolved-dnstls-gnutls.c
++++ b/src/resolve/resolved-dnstls-gnutls.c
+@@ -56,15 +56,19 @@ int dnstls_stream_connect_tls(DnsStream *stream, DnsServer *server) {
+ }
+
+ if (server->manager->dns_over_tls_mode == DNS_OVER_TLS_YES) {
+- stream->dnstls_data.validation.type = GNUTLS_DT_IP_ADDRESS;
+- if (server->family == AF_INET) {
+- stream->dnstls_data.validation.data = (unsigned char*) &server->address.in.s_addr;
+- stream->dnstls_data.validation.size = 4;
+- } else {
+- stream->dnstls_data.validation.data = server->address.in6.s6_addr;
+- stream->dnstls_data.validation.size = 16;
++ if (server->server_name)
++ gnutls_session_set_verify_cert(gs, server->server_name, 0);
++ else {
++ stream->dnstls_data.validation.type = GNUTLS_DT_IP_ADDRESS;
++ if (server->family == AF_INET) {
++ stream->dnstls_data.validation.data = (unsigned char*) &server->address.in.s_addr;
++ stream->dnstls_data.validation.size = 4;
++ } else {
++ stream->dnstls_data.validation.data = server->address.in6.s6_addr;
++ stream->dnstls_data.validation.size = 16;
++ }
++ gnutls_session_set_verify_cert2(gs, &stream->dnstls_data.validation, 1, 0);
+ }
+- gnutls_session_set_verify_cert2(gs, &stream->dnstls_data.validation, 1, 0);
+ }
+
+ gnutls_handshake_set_timeout(gs, GNUTLS_DEFAULT_HANDSHAKE_TIMEOUT);
+diff --git a/src/resolve/resolved-dnstls-openssl.c b/src/resolve/resolved-dnstls-openssl.c
+index 85e202ff74..007aedaa5b 100644
+--- a/src/resolve/resolved-dnstls-openssl.c
++++ b/src/resolve/resolved-dnstls-openssl.c
+@@ -6,6 +6,7 @@
+
+ #include <openssl/bio.h>
+ #include <openssl/err.h>
++#include <openssl/x509v3.h>
+
+ #include "io-util.h"
+ #include "resolved-dns-stream.h"
+@@ -78,13 +79,19 @@ int dnstls_stream_connect_tls(DnsStream *stream, DnsServer *server) {
+
+ if (server->manager->dns_over_tls_mode == DNS_OVER_TLS_YES) {
+ X509_VERIFY_PARAM *v;
+- const unsigned char *ip;
+
+ SSL_set_verify(s, SSL_VERIFY_PEER, NULL);
+ v = SSL_get0_param(s);
+- ip = server->family == AF_INET ? (const unsigned char*) &server->address.in.s_addr : server->address.in6.s6_addr;
+- if (!X509_VERIFY_PARAM_set1_ip(v, ip, FAMILY_ADDRESS_SIZE(server->family)))
+- return -ECONNREFUSED;
++ if (server->server_name) {
++ X509_VERIFY_PARAM_set_hostflags(v, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
++ if (X509_VERIFY_PARAM_set1_host(v, server->server_name, 0) == 0)
++ return -ECONNREFUSED;
++ } else {
++ const unsigned char *ip;
++ ip = server->family == AF_INET ? (const unsigned char*) &server->address.in.s_addr : server->address.in6.s6_addr;
++ if (X509_VERIFY_PARAM_set1_ip(v, ip, FAMILY_ADDRESS_SIZE(server->family)) == 0)
++ return -ECONNREFUSED;
++ }
+ }
+
+ ERR_clear_error();
+--
+2.40.1
+
diff --git a/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
new file mode 100644
index 0000000000..f9c6704cfc
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2022-3821.patch
@@ -0,0 +1,47 @@
+From 9102c625a673a3246d7e73d8737f3494446bad4e Mon Sep 17 00:00:00 2001
+From: Yu Watanabe <watanabe.yu+github@gmail.com>
+Date: Thu, 7 Jul 2022 18:27:02 +0900
+Subject: [PATCH] time-util: fix buffer-over-run
+
+Fixes #23928.
+
+CVE: CVE-2022-3821
+Upstream-Status: Backport [https://github.com/systemd/systemd/commit/9102c625a673a3246d7e73d8737f3494446bad4e.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: Both the hunks refreshed to backport
+
+---
+ src/basic/time-util.c | 2 +-
+ src/test/test-time-util.c | 5 +++++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/src/basic/time-util.c b/src/basic/time-util.c
+index abbc4ad5cd70..26d59de12348 100644
+--- a/src/basic/time-util.c
++++ b/src/basic/time-util.c
+@@ -514,7 +514,7 @@ char *format_timespan(char *buf, size_t
+ t = b;
+ }
+
+- n = MIN((size_t) k, l);
++ n = MIN((size_t) k, l-1);
+
+ l -= n;
+ p += n;
+diff --git a/src/test/test-time-util.c b/src/test/test-time-util.c
+index e8e4e2a67bb1..58c5fa9be40c 100644
+--- a/src/test/test-time-util.c
++++ b/src/test/test-time-util.c
+@@ -501,6 +501,12 @@ int main(int argc, char *argv[]) {
+ test_format_timespan(1);
+ test_format_timespan(USEC_PER_MSEC);
+ test_format_timespan(USEC_PER_SEC);
++
++ /* See issue #23928. */
++ _cleanup_free_ char *buf;
++ assert_se(buf = new(char, 5));
++ assert_se(buf == format_timespan(buf, 5, 100005, 1000));
++
+ test_timezone_is_valid();
+ test_get_timezones();
+ test_usec_add();
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch
new file mode 100644
index 0000000000..39f9480cf8
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-1.patch
@@ -0,0 +1,115 @@
+From 612ebf6c913dd0e4197c44909cb3157f5c51a2f0 Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Mon, 31 Aug 2020 19:37:13 +0200
+Subject: [PATCH] pager: set $LESSSECURE whenver we invoke a pager
+
+Some extra safety when invoked via "sudo". With this we address a
+genuine design flaw of sudo, and we shouldn't need to deal with this.
+But it's still a good idea to disable this surface given how exotic it
+is.
+
+Prompted by #5666
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/612ebf6c913dd0e4197c44909cb3157f5c51a2f0]
+Comments: Hunk not refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ man/less-variables.xml | 9 +++++++++
+ man/systemctl.xml | 1 +
+ man/systemd.xml | 1 +
+ src/shared/pager.c | 23 +++++++++++++++++++++--
+ 4 files changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/man/less-variables.xml b/man/less-variables.xml
+index 08e513c99f8e..c52511ca8e18 100644
+--- a/man/less-variables.xml
++++ b/man/less-variables.xml
+@@ -64,6 +64,15 @@
+ the invoking terminal is determined to be UTF-8 compatible).</para></listitem>
+ </varlistentry>
+
++ <varlistentry id='lesssecure'>
++ <term><varname>$SYSTEMD_LESSSECURE</varname></term>
++
++ <listitem><para>Takes a boolean argument. Overrides the <varname>$LESSSECURE</varname> environment
++ variable when invoking the pager, which controls the "secure" mode of less (which disables commands
++ such as <literal>|</literal> which allow to easily shell out to external command lines). By default
++ less secure mode is enabled, with this setting it may be disabled.</para></listitem>
++ </varlistentry>
++
+ <varlistentry id='colors'>
+ <term><varname>$SYSTEMD_COLORS</varname></term>
+
+diff --git a/man/systemctl.xml b/man/systemctl.xml
+index 1c5502883700..a3f0c3041a57 100644
+--- a/man/systemctl.xml
++++ b/man/systemctl.xml
+@@ -2240,6 +2240,7 @@ Jan 12 10:46:45 example.com bluetoothd[8900]: gatt-time-server: Input/output err
+ <xi:include href="less-variables.xml" xpointer="pager"/>
+ <xi:include href="less-variables.xml" xpointer="less"/>
+ <xi:include href="less-variables.xml" xpointer="lesscharset"/>
++ <xi:include href="less-variables.xml" xpointer="lesssecure"/>
+ <xi:include href="less-variables.xml" xpointer="colors"/>
+ <xi:include href="less-variables.xml" xpointer="urlify"/>
+ </refsect1>
+diff --git a/man/systemd.xml b/man/systemd.xml
+index a9040545c2ab..c92cfef77689 100644
+--- a/man/systemd.xml
++++ b/man/systemd.xml
+@@ -692,6 +692,7 @@
+ <xi:include href="less-variables.xml" xpointer="pager"/>
+ <xi:include href="less-variables.xml" xpointer="less"/>
+ <xi:include href="less-variables.xml" xpointer="lesscharset"/>
++ <xi:include href="less-variables.xml" xpointer="lesssecure"/>
+ <xi:include href="less-variables.xml" xpointer="colors"/>
+ <xi:include href="less-variables.xml" xpointer="urlify"/>
+
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index e03be6d23b2d..9c21881241f5 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -9,6 +9,7 @@
+ #include <unistd.h>
+
+ #include "copy.h"
++#include "env-util.h"
+ #include "fd-util.h"
+ #include "fileio.h"
+ #include "io-util.h"
+@@ -152,8 +153,7 @@ int pager_open(PagerFlags flags) {
+ _exit(EXIT_FAILURE);
+ }
+
+- /* Initialize a good charset for less. This is
+- * particularly important if we output UTF-8
++ /* Initialize a good charset for less. This is particularly important if we output UTF-8
+ * characters. */
+ less_charset = getenv("SYSTEMD_LESSCHARSET");
+ if (!less_charset && is_locale_utf8())
+@@ -164,6 +164,25 @@ int pager_open(PagerFlags flags) {
+ _exit(EXIT_FAILURE);
+ }
+
++ /* People might invoke us from sudo, don't needlessly allow less to be a way to shell out
++ * privileged stuff. */
++ r = getenv_bool("SYSTEMD_LESSSECURE");
++ if (r == 0) { /* Remove env var if off */
++ if (unsetenv("LESSSECURE") < 0) {
++ log_error_errno(errno, "Failed to uset environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++ } else {
++ /* Set env var otherwise */
++ if (r < 0)
++ log_warning_errno(r, "Unable to parse $SYSTEMD_LESSSECURE, ignoring: %m");
++
++ if (setenv("LESSSECURE", "1", 1) < 0) {
++ log_error_errno(errno, "Failed to set environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++ }
++
+ if (pager_args) {
+ r = loop_write(exe_name_pipe[1], pager_args[0], strlen(pager_args[0]) + 1, false);
+ if (r < 0) {
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch
new file mode 100644
index 0000000000..95da7cfad6
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-2.patch
@@ -0,0 +1,264 @@
+From 1b5b507cd2d1d7a2b053151abb548475ad9c5c3b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Mon, 12 Oct 2020 18:57:32 +0200
+Subject: [PATCH] test-login: always test sd_pid_get_owner_uid(), modernize
+
+A long time some function only worked when in a session, and the test
+didn't execute them when sd_pid_get_session() failed. Let's always call
+them to increase coverage.
+
+While at it, let's test for ==0 not >=0 where we don't expect the function
+to return anything except 0 or error.
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/1b5b507cd2d1d7a2b053151abb548475ad9c5c3b.patch]
+Comments: Hunk not refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ src/libsystemd/sd-login/test-login.c | 131 ++++++++++++++-------------
+ 1 file changed, 70 insertions(+), 61 deletions(-)
+
+diff --git a/src/libsystemd/sd-login/test-login.c b/src/libsystemd/sd-login/test-login.c
+index c0c77e04714b..0494fc77ba18 100644
+--- a/src/libsystemd/sd-login/test-login.c
++++ b/src/libsystemd/sd-login/test-login.c
+@@ -5,21 +5,22 @@
+ #include "sd-login.h"
+
+ #include "alloc-util.h"
++#include "errno-list.h"
+ #include "fd-util.h"
+ #include "format-util.h"
+ #include "log.h"
+ #include "string-util.h"
+ #include "strv.h"
+ #include "time-util.h"
+-#include "util.h"
++#include "user-util.h"
+
+ static char* format_uids(char **buf, uid_t* uids, int count) {
+- int pos = 0, k, inc;
++ int pos = 0, inc;
+ size_t size = (DECIMAL_STR_MAX(uid_t) + 1) * count + 1;
+
+ assert_se(*buf = malloc(size));
+
+- for (k = 0; k < count; k++) {
++ for (int k = 0; k < count; k++) {
+ sprintf(*buf + pos, "%s"UID_FMT"%n", k > 0 ? " " : "", uids[k], &inc);
+ pos += inc;
+ }
+@@ -30,6 +31,10 @@ static char* format_uids(char **buf, uid_t* uids, int count) {
+ return *buf;
+ }
+
++static const char *e(int r) {
++ return r == 0 ? "OK" : errno_to_name(r);
++}
++
+ static void test_login(void) {
+ _cleanup_close_pair_ int pair[2] = { -1, -1 };
+ _cleanup_free_ char *pp = NULL, *qq = NULL,
+@@ -39,65 +44,71 @@ static void test_login(void) {
+ *seat = NULL, *session = NULL,
+ *unit = NULL, *user_unit = NULL, *slice = NULL;
+ int r;
+- uid_t u, u2;
+- char *t, **seats, **sessions;
++ uid_t u, u2 = UID_INVALID;
++ char *t, **seats = NULL, **sessions = NULL;
+
+ r = sd_pid_get_unit(0, &unit);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_unit(0, …) → \"%s\"", strna(unit));
++ log_info("sd_pid_get_unit(0, …) → %s / \"%s\"", e(r), strnull(unit));
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_user_unit(0, &user_unit);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_user_unit(0, …) → \"%s\"", strna(user_unit));
++ log_info("sd_pid_get_user_unit(0, …) → %s / \"%s\"", e(r), strnull(user_unit));
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_slice(0, &slice);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_pid_get_slice(0, …) → \"%s\"", strna(slice));
++ log_info("sd_pid_get_slice(0, …) → %s / \"%s\"", e(r), strnull(slice));
++ assert_se(IN_SET(r, 0, -ENODATA));
++
++ r = sd_pid_get_owner_uid(0, &u2);
++ log_info("sd_pid_get_owner_uid(0, …) → %s / "UID_FMT, e(r), u2);
++ assert_se(IN_SET(r, 0, -ENODATA));
+
+ r = sd_pid_get_session(0, &session);
+- if (r < 0) {
+- log_warning_errno(r, "sd_pid_get_session(0, …): %m");
+- if (r == -ENODATA)
+- log_info("Seems we are not running in a session, skipping some tests.");
+- } else {
+- log_info("sd_pid_get_session(0, …) → \"%s\"", session);
+-
+- assert_se(sd_pid_get_owner_uid(0, &u2) == 0);
+- log_info("sd_pid_get_owner_uid(0, …) → "UID_FMT, u2);
+-
+- assert_se(sd_pid_get_cgroup(0, &cgroup) == 0);
+- log_info("sd_pid_get_cgroup(0, …) → \"%s\"", cgroup);
+-
+- r = sd_uid_get_display(u2, &display_session);
+- assert_se(r >= 0 || r == -ENODATA);
+- log_info("sd_uid_get_display("UID_FMT", …) → \"%s\"",
+- u2, strnull(display_session));
+-
+- assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == 0);
+- sd_peer_get_session(pair[0], &pp);
+- sd_peer_get_session(pair[1], &qq);
+- assert_se(streq_ptr(pp, qq));
+-
+- r = sd_uid_get_sessions(u2, false, &sessions);
++ log_info("sd_pid_get_session(0, …) → %s / \"%s\"", e(r), strnull(session));
++
++ r = sd_pid_get_cgroup(0, &cgroup);
++ log_info("sd_pid_get_cgroup(0, …) → %s / \"%s\"", e(r), strnull(cgroup));
++ assert_se(r == 0);
++
++ r = sd_uid_get_display(u2, &display_session);
++ log_info("sd_uid_get_display("UID_FMT", …) → %s / \"%s\"", u2, e(r), strnull(display_session));
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else
++ assert_se(IN_SET(r, 0, -ENODATA));
++
++ assert_se(socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == 0);
++ sd_peer_get_session(pair[0], &pp);
++ sd_peer_get_session(pair[1], &qq);
++ assert_se(streq_ptr(pp, qq));
++
++ r = sd_uid_get_sessions(u2, false, &sessions);
++ assert_se(t = strv_join(sessions, " "));
++ log_info("sd_uid_get_sessions("UID_FMT", …) → %s \"%s\"", u2, e(r), t);
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else {
+ assert_se(r >= 0);
+ assert_se(r == (int) strv_length(sessions));
+- assert_se(t = strv_join(sessions, " "));
+- strv_free(sessions);
+- log_info("sd_uid_get_sessions("UID_FMT", …) → [%i] \"%s\"", u2, r, t);
+- free(t);
++ }
++ sessions = strv_free(sessions);
++ free(t);
+
+- assert_se(r == sd_uid_get_sessions(u2, false, NULL));
++ assert_se(r == sd_uid_get_sessions(u2, false, NULL));
+
+- r = sd_uid_get_seats(u2, false, &seats);
++ r = sd_uid_get_seats(u2, false, &seats);
++ assert_se(t = strv_join(seats, " "));
++ log_info("sd_uid_get_seats("UID_FMT", …) → %s \"%s\"", u2, e(r), t);
++ if (u2 == UID_INVALID)
++ assert_se(r == -EINVAL);
++ else {
+ assert_se(r >= 0);
+ assert_se(r == (int) strv_length(seats));
+- assert_se(t = strv_join(seats, " "));
+- strv_free(seats);
+- log_info("sd_uid_get_seats("UID_FMT", …) → [%i] \"%s\"", u2, r, t);
+- free(t);
+-
+- assert_se(r == sd_uid_get_seats(u2, false, NULL));
+ }
++ seats = strv_free(seats);
++ free(t);
++
++ assert_se(r == sd_uid_get_seats(u2, false, NULL));
+
+ if (session) {
+ r = sd_session_is_active(session);
+@@ -109,7 +120,7 @@ static void test_login(void) {
+ log_info("sd_session_is_remote(\"%s\") → %s", session, yes_no(r));
+
+ r = sd_session_get_state(session, &state);
+- assert_se(r >= 0);
++ assert_se(r == 0);
+ log_info("sd_session_get_state(\"%s\") → \"%s\"", session, state);
+
+ assert_se(sd_session_get_uid(session, &u) >= 0);
+@@ -123,16 +134,16 @@ static void test_login(void) {
+ log_info("sd_session_get_class(\"%s\") → \"%s\"", session, class);
+
+ r = sd_session_get_display(session, &display);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_display(\"%s\") → \"%s\"", session, strna(display));
+
+ r = sd_session_get_remote_user(session, &remote_user);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_remote_user(\"%s\") → \"%s\"",
+ session, strna(remote_user));
+
+ r = sd_session_get_remote_host(session, &remote_host);
+- assert_se(r >= 0 || r == -ENODATA);
++ assert_se(IN_SET(r, 0, -ENODATA));
+ log_info("sd_session_get_remote_host(\"%s\") → \"%s\"",
+ session, strna(remote_host));
+
+@@ -161,7 +172,7 @@ static void test_login(void) {
+ assert_se(r == -ENODATA);
+ }
+
+- assert_se(sd_uid_get_state(u, &state2) >= 0);
++ assert_se(sd_uid_get_state(u, &state2) == 0);
+ log_info("sd_uid_get_state("UID_FMT", …) → %s", u, state2);
+ }
+
+@@ -173,11 +184,11 @@ static void test_login(void) {
+ assert_se(sd_uid_is_on_seat(u, 0, seat) > 0);
+
+ r = sd_seat_get_active(seat, &session2, &u2);
+- assert_se(r >= 0);
++ assert_se(r == 0);
+ log_info("sd_seat_get_active(\"%s\", …) → \"%s\", "UID_FMT, seat, session2, u2);
+
+ r = sd_uid_is_on_seat(u, 1, seat);
+- assert_se(r >= 0);
++ assert_se(IN_SET(r, 0, 1));
+ assert_se(!!r == streq(session, session2));
+
+ r = sd_seat_get_sessions(seat, &sessions, &uids, &n);
+@@ -185,8 +196,8 @@ static void test_login(void) {
+ assert_se(r == (int) strv_length(sessions));
+ assert_se(t = strv_join(sessions, " "));
+ strv_free(sessions);
+- log_info("sd_seat_get_sessions(\"%s\", …) → %i, \"%s\", [%i] {%s}",
+- seat, r, t, n, format_uids(&buf, uids, n));
++ log_info("sd_seat_get_sessions(\"%s\", …) → %s, \"%s\", [%u] {%s}",
++ seat, e(r), t, n, format_uids(&buf, uids, n));
+ free(t);
+
+ assert_se(sd_seat_get_sessions(seat, NULL, NULL, NULL) == r);
+@@ -204,7 +215,7 @@ static void test_login(void) {
+
+ r = sd_seat_get_active(NULL, &t, NULL);
+ assert_se(IN_SET(r, 0, -ENODATA));
+- log_info("sd_seat_get_active(NULL, …) (active session on current seat) → %s", strnull(t));
++ log_info("sd_seat_get_active(NULL, …) (active session on current seat) → %s / \"%s\"", e(r), strnull(t));
+ free(t);
+
+ r = sd_get_sessions(&sessions);
+@@ -244,13 +255,11 @@ static void test_login(void) {
+
+ static void test_monitor(void) {
+ sd_login_monitor *m = NULL;
+- unsigned n;
+ int r;
+
+- r = sd_login_monitor_new("session", &m);
+- assert_se(r >= 0);
++ assert_se(sd_login_monitor_new("session", &m) == 0);
+
+- for (n = 0; n < 5; n++) {
++ for (unsigned n = 0; n < 5; n++) {
+ struct pollfd pollfd = {};
+ usec_t timeout, nw;
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch
new file mode 100644
index 0000000000..f02f62b772
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-3.patch
@@ -0,0 +1,182 @@
+From 0a42426d797406b4b01a0d9c13bb759c2629d108 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Wed, 7 Oct 2020 11:15:05 +0200
+Subject: [PATCH] pager: make pager secure when under euid is changed or
+ explicitly requested
+
+The variable is renamed to SYSTEMD_PAGERSECURE (because it's not just about
+less now), and we automatically enable secure mode in certain cases, but not
+otherwise.
+
+This approach is more nuanced, but should provide a better experience for
+users:
+
+- Previusly we would set LESSSECURE=1 and trust the pager to make use of
+ it. But this has an effect only on less. We need to not start pagers which
+ are insecure when in secure mode. In particular more is like that and is a
+ very popular pager.
+
+- We don't enable secure mode always, which means that those other pagers can
+ reasonably used.
+
+- We do the right thing by default, but the user has ultimate control by
+ setting SYSTEMD_PAGERSECURE.
+
+Fixes #5666.
+
+v2:
+- also check $PKEXEC_UID
+
+v3:
+- use 'sd_pid_get_owner_uid() != geteuid()' as the condition
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17270/commits/0a42426d797406b4b01a0d9c13bb759c2629d108]
+Comments: Hunk refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ man/less-variables.xml | 30 +++++++++++++++----
+ src/shared/pager.c | 63 ++++++++++++++++++++++++++-------------
+ 2 files changed, 66 insertions(+), 27 deletions(-)
+
+diff --git a/man/less-variables.xml b/man/less-variables.xml
+index c52511c..049e9f7 100644
+--- a/man/less-variables.xml
++++ b/man/less-variables.xml
+@@ -65,12 +65,30 @@
+ </varlistentry>
+
+ <varlistentry id='lesssecure'>
+- <term><varname>$SYSTEMD_LESSSECURE</varname></term>
+-
+- <listitem><para>Takes a boolean argument. Overrides the <varname>$LESSSECURE</varname> environment
+- variable when invoking the pager, which controls the "secure" mode of less (which disables commands
+- such as <literal>|</literal> which allow to easily shell out to external command lines). By default
+- less secure mode is enabled, with this setting it may be disabled.</para></listitem>
++ <term><varname>$SYSTEMD_PAGERSECURE</varname></term>
++
++ <listitem><para>Takes a boolean argument. When true, the "secure" mode of the pager is enabled; if
++ false, disabled. If <varname>$SYSTEMD_PAGERSECURE</varname> is not set at all, secure mode is enabled
++ if the effective UID is not the same as the owner of the login session, see <citerefentry
++ project='man-pages'><refentrytitle>geteuid</refentrytitle><manvolnum>2</manvolnum></citerefentry> and
++ <citerefentry><refentrytitle>sd_pid_get_owner_uid</refentrytitle><manvolnum>3</manvolnum></citerefentry>.
++ In secure mode, <option>LESSSECURE=1</option> will be set when invoking the pager, and the pager shall
++ disable commands that open or create new files or start new subprocesses. When
++ <varname>$SYSTEMD_PAGERSECURE</varname> is not set at all, pagers which are not known to implement
++ secure mode will not be used. (Currently only
++ <citerefentry><refentrytitle>less</refentrytitle><manvolnum>1</manvolnum></citerefentry> implements
++ secure mode.)</para>
++
++ <para>Note: when commands are invoked with elevated privileges, for example under <citerefentry
++ project='man-pages'><refentrytitle>sudo</refentrytitle><manvolnum>8</manvolnum></citerefentry> or
++ <citerefentry
++ project='die-net'><refentrytitle>pkexec</refentrytitle><manvolnum>1</manvolnum></citerefentry>, care
++ must be taken to ensure that unintended interactive features are not enabled. "Secure" mode for the
++ pager may be enabled automatically as describe above. Setting <varname>SYSTEMD_PAGERSECURE=0</varname>
++ or not removing it from the inherited environment allows the user to invoke arbitrary commands. Note
++ that if the <varname>$SYSTEMD_PAGER</varname> or <varname>$PAGER</varname> variables are to be
++ honoured, <varname>$SYSTEMD_PAGERSECURE</varname> must be set too. It might be reasonable to completly
++ disable the pager using <option>--no-pager</option> instead.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id='colors'>
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index a3b6576..a72d9ea 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -8,6 +8,8 @@
+ #include <sys/prctl.h>
+ #include <unistd.h>
+
++#include "sd-login.h"
++
+ #include "copy.h"
+ #include "env-util.h"
+ #include "fd-util.h"
+@@ -164,25 +166,42 @@ int pager_open(PagerFlags flags) {
+ }
+
+ /* People might invoke us from sudo, don't needlessly allow less to be a way to shell out
+- * privileged stuff. */
+- r = getenv_bool("SYSTEMD_LESSSECURE");
+- if (r == 0) { /* Remove env var if off */
+- if (unsetenv("LESSSECURE") < 0) {
+- log_error_errno(errno, "Failed to uset environment variable LESSSECURE: %m");
+- _exit(EXIT_FAILURE);
+- }
+- } else {
+- /* Set env var otherwise */
++ * privileged stuff. If the user set $SYSTEMD_PAGERSECURE, trust their configuration of the
++ * pager. If they didn't, use secure mode when under euid is changed. If $SYSTEMD_PAGERSECURE
++ * wasn't explicitly set, and we autodetect the need for secure mode, only use the pager we
++ * know to be good. */
++ int use_secure_mode = getenv_bool("SYSTEMD_PAGERSECURE");
++ bool trust_pager = use_secure_mode >= 0;
++ if (use_secure_mode == -ENXIO) {
++ uid_t uid;
++
++ r = sd_pid_get_owner_uid(0, &uid);
+ if (r < 0)
+- log_warning_errno(r, "Unable to parse $SYSTEMD_LESSSECURE, ignoring: %m");
++ log_debug_errno(r, "sd_pid_get_owner_uid() failed, enabling pager secure mode: %m");
+
+- if (setenv("LESSSECURE", "1", 1) < 0) {
+- log_error_errno(errno, "Failed to set environment variable LESSSECURE: %m");
+- _exit(EXIT_FAILURE);
+- }
++ use_secure_mode = r < 0 || uid != geteuid();
++
++ } else if (use_secure_mode < 0) {
++ log_warning_errno(use_secure_mode, "Unable to parse $SYSTEMD_PAGERSECURE, assuming true: %m");
++ use_secure_mode = true;
+ }
+
+- if (pager_args) {
++ /* We generally always set variables used by less, even if we end up using a different pager.
++ * They shouldn't hurt in any case, and ideally other pagers would look at them too. */
++ if (use_secure_mode)
++ r = setenv("LESSSECURE", "1", 1);
++ else
++ r = unsetenv("LESSSECURE");
++ if (r < 0) {
++ log_error_errno(errno, "Failed to adjust environment variable LESSSECURE: %m");
++ _exit(EXIT_FAILURE);
++ }
++
++ if (trust_pager && pager_args) { /* The pager config might be set globally, and we cannot
++ * know if the user adjusted it to be appropriate for the
++ * secure mode. Thus, start the pager specified through
++ * envvars only when $SYSTEMD_PAGERSECURE was explicitly set
++ * as well. */
+ r = loop_write(exe_name_pipe[1], pager_args[0], strlen(pager_args[0]) + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
+@@ -194,13 +213,14 @@ int pager_open(PagerFlags flags) {
+ "Failed to execute '%s', using fallback pagers: %m", pager_args[0]);
+ }
+
+- /* Debian's alternatives command for pagers is
+- * called 'pager'. Note that we do not call
+- * sensible-pagers here, since that is just a
+- * shell script that implements a logic that
+- * is similar to this one anyway, but is
+- * Debian-specific. */
++ /* Debian's alternatives command for pagers is called 'pager'. Note that we do not call
++ * sensible-pagers here, since that is just a shell script that implements a logic that is
++ * similar to this one anyway, but is Debian-specific. */
+ FOREACH_STRING(exe, "pager", "less", "more") {
++ /* Only less implements secure mode right now. */
++ if (use_secure_mode && !streq(exe, "less"))
++ continue;
++
+ r = loop_write(exe_name_pipe[1], exe, strlen(exe) + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
+@@ -211,6 +231,7 @@ int pager_open(PagerFlags flags) {
+ "Failed to execute '%s', using next fallback pager: %m", exe);
+ }
+
++ /* Our builtin is also very secure. */
+ r = loop_write(exe_name_pipe[1], "(built-in)", strlen("(built-in)") + 1, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write pager name to socket: %m");
diff --git a/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch b/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch
new file mode 100644
index 0000000000..bc6b0a91c2
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/CVE-2023-26604-4.patch
@@ -0,0 +1,32 @@
+From b8f736b30e20a2b44e7c34bb4e43b0d97ae77e3c Mon Sep 17 00:00:00 2001
+From: Lennart Poettering <lennart@poettering.net>
+Date: Thu, 15 Oct 2020 10:54:48 +0200
+Subject: [PATCH] pager: lets check SYSTEMD_PAGERSECURE with secure_getenv()
+
+I can't think of any real vulnerability about this, but it still feels
+better to check a variable with "secure" in its name with
+secure_getenv() rather than plain getenv().
+
+Paranoia FTW!
+
+CVE: CVE-2023-26604
+Upstream-Status: Backport [https://github.com/systemd/systemd/pull/17359/commits/b8f736b30e20a2b44e7c34bb4e43b0d97ae77e3c]
+Comments: Hunk refreshed
+Signed-off-by: rajmohan r <rajmohan.r@kpit.com>
+---
+ src/shared/pager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/shared/pager.c b/src/shared/pager.c
+index a72d9ea..250519c 100644
+--- a/src/shared/pager.c
++++ b/src/shared/pager.c
+@@ -170,7 +170,7 @@ int pager_open(PagerFlags flags) {
+ * pager. If they didn't, use secure mode when under euid is changed. If $SYSTEMD_PAGERSECURE
+ * wasn't explicitly set, and we autodetect the need for secure mode, only use the pager we
+ * know to be good. */
+- int use_secure_mode = getenv_bool("SYSTEMD_PAGERSECURE");
++ int use_secure_mode = getenv_bool_secure("SYSTEMD_PAGERSECURE");
+ bool trust_pager = use_secure_mode >= 0;
+ if (use_secure_mode == -ENXIO) {
+ uid_t uid;
diff --git a/meta/recipes-core/systemd/systemd/systemd-pager.sh b/meta/recipes-core/systemd/systemd/systemd-pager.sh
new file mode 100644
index 0000000000..86e3e0ab78
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/systemd-pager.sh
@@ -0,0 +1,7 @@
+# Systemd expect a color capable pager, however the less provided
+# by busybox is not. This make many interaction with systemd pretty
+# annoying. As a workaround we disable the systemd pager if less
+# is not the GNU version.
+if ! less -V > /dev/null 2>&1 ; then
+ export SYSTEMD_PAGER=
+fi
diff --git a/meta/recipes-core/systemd/systemd_244.5.bb b/meta/recipes-core/systemd/systemd_244.5.bb
index a648272bc0..8b2f47b92f 100644
--- a/meta/recipes-core/systemd/systemd_244.5.bb
+++ b/meta/recipes-core/systemd/systemd_244.5.bb
@@ -18,6 +18,7 @@ SRC_URI += "file://touchscreen.rules \
file://00-create-volatile.conf \
file://init \
file://99-default.preset \
+ file://systemd-pager.sh \
file://0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch \
file://0003-implment-systemd-sysv-install-for-OE.patch \
file://CVE-2021-33910.patch \
@@ -30,9 +31,15 @@ SRC_URI += "file://touchscreen.rules \
file://network-fix-Link-reference-counter-issue.patch \
file://rm-rf-refactor-rm-rf-children-split-out-body-of-directory.patch \
file://rm-rf-optionally-fsync-after-removing-directory-tree.patch \
+ file://CVE-2018-21029.patch \
file://CVE-2021-3997-1.patch \
file://CVE-2021-3997-2.patch \
file://CVE-2021-3997-3.patch \
+ file://CVE-2022-3821.patch \
+ file://CVE-2023-26604-1.patch \
+ file://CVE-2023-26604-2.patch \
+ file://CVE-2023-26604-3.patch \
+ file://CVE-2023-26604-4.patch \
"
# patches needed by musl
@@ -162,6 +169,7 @@ PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native xmlto-native do
PACKAGECONFIG[microhttpd] = "-Dmicrohttpd=true,-Dmicrohttpd=false,libmicrohttpd"
PACKAGECONFIG[myhostname] = "-Dnss-myhostname=true,-Dnss-myhostname=false,,libnss-myhostname"
PACKAGECONFIG[networkd] = "-Dnetworkd=true,-Dnetworkd=false"
+PACKAGECONFIG[no-dns-fallback] = "-Ddns-servers="
PACKAGECONFIG[nss] = "-Dnss-systemd=true,-Dnss-systemd=false"
PACKAGECONFIG[nss-mymachines] = "-Dnss-mymachines=true,-Dnss-mymachines=false"
PACKAGECONFIG[nss-resolve] = "-Dnss-resolve=true,-Dnss-resolve=false"
@@ -212,7 +220,7 @@ rootlibexecdir = "${rootprefix}/lib"
EXTRA_OEMESON += "-Dlink-udev-shared=false"
EXTRA_OEMESON += "-Dnobody-user=nobody \
- -Dnobody-group=nobody \
+ -Dnobody-group=nogroup \
-Drootlibdir=${rootlibdir} \
-Drootprefix=${rootprefix} \
-Ddefault-locale=C \
@@ -315,6 +323,9 @@ do_install() {
# install default policy for presets
# https://www.freedesktop.org/wiki/Software/systemd/Preset/#howto
install -Dm 0644 ${WORKDIR}/99-default.preset ${D}${systemd_unitdir}/system-preset/99-default.preset
+
+ # add a profile fragment to disable systemd pager with busybox less
+ install -Dm 0644 ${WORKDIR}/systemd-pager.sh ${D}${sysconfdir}/profile.d/systemd-pager.sh
}
python populate_packages_prepend (){
@@ -402,9 +413,9 @@ FILES_${PN}-binfmt = "${sysconfdir}/binfmt.d/ \
${rootlibexecdir}/systemd/systemd-binfmt \
${systemd_unitdir}/system/proc-sys-fs-binfmt_misc.* \
${systemd_unitdir}/system/systemd-binfmt.service"
-RRECOMMENDS_${PN}-binfmt = "kernel-module-binfmt-misc"
+RRECOMMENDS_${PN}-binfmt = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', 'kernel-module-binfmt-misc', '', d)}"
-RRECOMMENDS_${PN}-vconsole-setup = "kbd kbd-consolefonts kbd-keymaps"
+RRECOMMENDS_${PN}-vconsole-setup = "${@bb.utils.contains('PACKAGECONFIG', 'vconsole', 'kbd kbd-consolefonts kbd-keymaps', '', d)}"
FILES_${PN}-journal-gatewayd = "${rootlibexecdir}/systemd/systemd-journal-gatewayd \
@@ -537,6 +548,7 @@ FILES_${PN} = " ${base_bindir}/* \
${sysconfdir}/dbus-1/ \
${sysconfdir}/modules-load.d/ \
${sysconfdir}/pam.d/ \
+ ${sysconfdir}/profile.d/ \
${sysconfdir}/sysctl.d/ \
${sysconfdir}/systemd/ \
${sysconfdir}/tmpfiles.d/ \
diff --git a/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
new file mode 100644
index 0000000000..d29e6e0f1f
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2022-37434.patch
@@ -0,0 +1,44 @@
+From 8617d83d6939754ae3a04fc2d22daa18eeea2a43 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 17 Aug 2022 10:15:57 +0530
+Subject: [PATCH] CVE-2022-37434
+
+Upstream-Status: Backport [https://github.com/madler/zlib/commit/eff308af425b67093bab25f80f1ae950166bece1 & https://github.com/madler/zlib/commit/1eb7682f845ac9e9bf9ae35bbfb3bad5dacbd91d]
+CVE: CVE-2022-37434
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Fix a bug when getting a gzip header extra field with inflate().
+
+If the extra field was larger than the space the user provided with
+inflateGetHeader(), and if multiple calls of inflate() delivered
+the extra header data, then there could be a buffer overflow of the
+provided space. This commit assures that provided space is not
+exceeded.
+
+ Fix extra field processing bug that dereferences NULL state->head.
+
+The recent commit to fix a gzip header extra field processing bug
+introduced the new bug fixed here.
+---
+ inflate.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/inflate.c b/inflate.c
+index ac333e8..cd01857 100644
+--- a/inflate.c
++++ b/inflate.c
+@@ -759,8 +759,9 @@ int flush;
+ if (copy > have) copy = have;
+ if (copy) {
+ if (state->head != Z_NULL &&
+- state->head->extra != Z_NULL) {
+- len = state->head->extra_len - state->length;
++ state->head->extra != Z_NULL &&
++ (len = state->head->extra_len - state->length) <
++ state->head->extra_max) {
+ zmemcpy(state->head->extra + len, next,
+ len + copy > state->head->extra_max ?
+ state->head->extra_max - len : copy);
+--
+2.25.1
+
diff --git a/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
new file mode 100644
index 0000000000..654579eb81
--- /dev/null
+++ b/meta/recipes-core/zlib/zlib/CVE-2023-45853.patch
@@ -0,0 +1,40 @@
+From 73331a6a0481067628f065ffe87bb1d8f787d10c Mon Sep 17 00:00:00 2001
+From: Hans Wennborg <hans@chromium.org>
+Date: Fri, 18 Aug 2023 11:05:33 +0200
+Subject: [PATCH] Reject overflows of zip header fields in minizip.
+
+This checks the lengths of the file name, extra field, and comment
+that would be put in the zip headers, and rejects them if they are
+too long. They are each limited to 65535 bytes in length by the zip
+format. This also avoids possible buffer overflows if the provided
+fields are too long.
+
+Upstream-Status: Backport from [https://github.com/madler/zlib/commit/73331a6a0481067628f065ffe87bb1d8f787d10c]
+CVE: CVE-2023-45853
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ contrib/minizip/zip.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/contrib/minizip/zip.c b/contrib/minizip/zip.c
+index 3d3d4cadd..0446109b2 100644
+--- a/contrib/minizip/zip.c
++++ b/contrib/minizip/zip.c
+@@ -1043,6 +1043,17 @@ extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, const char* filename, c
+ return ZIP_PARAMERROR;
+ #endif
+
++ // The filename and comment length must fit in 16 bits.
++ if ((filename!=NULL) && (strlen(filename)>0xffff))
++ return ZIP_PARAMERROR;
++ if ((comment!=NULL) && (strlen(comment)>0xffff))
++ return ZIP_PARAMERROR;
++ // The extra field length must fit in 16 bits. If the member also requires
++ // a Zip64 extra block, that will also need to fit within that 16-bit
++ // length, but that will be checked for later.
++ if ((size_extrafield_local>0xffff) || (size_extrafield_global>0xffff))
++ return ZIP_PARAMERROR;
++
+ zi = (zip64_internal*)file;
+
+ if (zi->in_opened_file_inzip == 1)
diff --git a/meta/recipes-core/zlib/zlib_1.2.11.bb b/meta/recipes-core/zlib/zlib_1.2.11.bb
index bc42cd64e9..9355f0556e 100644
--- a/meta/recipes-core/zlib/zlib_1.2.11.bb
+++ b/meta/recipes-core/zlib/zlib_1.2.11.bb
@@ -10,6 +10,8 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/${BPN}/${PV}/${BPN}-${PV}.tar.xz \
file://ldflags-tests.patch \
file://CVE-2018-25032.patch \
file://run-ptest \
+ file://CVE-2022-37434.patch \
+ file://CVE-2023-45853.patch \
"
UPSTREAM_CHECK_URI = "http://zlib.net/"
@@ -51,3 +53,6 @@ do_install_append_class-target() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# this CVE is for cloudflare zlib
+CVE_CHECK_WHITELIST += "CVE-2023-6992"
diff --git a/meta/recipes-devtools/binutils/binutils-2.34.inc b/meta/recipes-devtools/binutils/binutils-2.34.inc
index 6a55de2d45..032263fe63 100644
--- a/meta/recipes-devtools/binutils/binutils-2.34.inc
+++ b/meta/recipes-devtools/binutils/binutils-2.34.inc
@@ -24,7 +24,7 @@ BRANCH ?= "binutils-2_34-branch"
UPSTREAM_CHECK_GITTAGREGEX = "binutils-(?P<pver>\d+_(\d_?)*)"
-SRCREV ?= "d4b50999b3b287b5f984ade2f8734aa8c9359440"
+SRCREV ?= "c4e78c0868a22971680217a41fdb73516a26813d"
BINUTILS_GIT_URI ?= "git://sourceware.org/git/binutils-gdb.git;branch=${BRANCH};protocol=git"
SRC_URI = "\
${BINUTILS_GIT_URI} \
@@ -52,5 +52,15 @@ SRC_URI = "\
file://CVE-2021-3549.patch \
file://CVE-2020-16593.patch \
file://0001-CVE-2021-45078.patch \
+ file://CVE-2022-38533.patch \
+ file://CVE-2023-25588.patch \
+ file://CVE-2021-46174.patch \
+ file://CVE-2023-25584.patch \
+ file://CVE-2022-47007.patch \
+ file://CVE-2022-47008.patch \
+ file://CVE-2022-47010.patch \
+ file://CVE-2022-47011.patch \
+ file://CVE-2022-48063.patch \
+ file://CVE-2022-47695.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch b/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch
index cbe4a50507..c7c7829261 100644
--- a/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2020-16593.patch
@@ -199,6 +199,6 @@ Index: git/bfd/ChangeLog
+ * dwarf2.c (scan_unit_for_symbols): Wrap overlong lines. Don't
+ strdup(0).
+
- 2020-02-19 H.J. Lu <hongjiu.lu@intel.com>
+ 2021-05-03 Alan Modra <amodra@gmail.com>
- PR binutils/25355
+ PR 27755
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch
index 4391db340a..5f56dd7696 100644
--- a/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-3549.patch
@@ -7,31 +7,49 @@ Adds missing sanity checks for avr device info note, to avoid
potential buffer overflows. Uses bfd_malloc_and_get_section for
sanity checking section size.
- PR 27290
- PR 27293
- PR 27295
- * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
- Use bfd_malloc_and_get_section.
- (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
- check namesz. Return NULL if descsz is too small. Ensure
- string table is terminated.
- (elf32_avr_get_device_info): Formatting. Add note_size param.
- Sanity check note.
- (elf32_avr_dump_mem_usage): Adjust to suit.
+ PR 27290
+ PR 27293
+ PR 27295
+ * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
+ Use bfd_malloc_and_get_section.
+ (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
+ check namesz. Return NULL if descsz is too small. Ensure
+ string table is terminated.
+ (elf32_avr_get_device_info): Formatting. Add note_size param.
+ Sanity check note.
+ (elf32_avr_dump_mem_usage): Adjust to suit.
Upstream-Status: Backport
CVE: CVE-2021-3549
Signed-of-by: Armin Kuster <akuster@mvista.com>
---
- binutils/ChangeLog | 14 +++++++++
- binutils/od-elf32_avr.c | 66 ++++++++++++++++++++++++++---------------
- 2 files changed, 56 insertions(+), 24 deletions(-)
-
-Index: git/binutils/od-elf32_avr.c
-===================================================================
---- git.orig/binutils/od-elf32_avr.c
-+++ git/binutils/od-elf32_avr.c
+diff --git a/binutils/ChangeLog b/binutils/ChangeLog
+index 1e9a96c9bb6..02e5019204e 100644
+--- a/binutils/ChangeLog
++++ b/binutils/ChangeLog
+@@ -1,3 +1,17 @@
++2021-02-11 Alan Modra <amodra@gmail.com>
++
++ PR 27290
++ PR 27293
++ PR 27295
++ * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
++ Use bfd_malloc_and_get_section.
++ (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
++ check namesz. Return NULL if descsz is too small. Ensure
++ string table is terminated.
++ (elf32_avr_get_device_info): Formatting. Add note_size param.
++ Sanity check note.
++ (elf32_avr_dump_mem_usage): Adjust to suit.
++
+ 2020-03-25 H.J. Lu <hongjiu.lu@intel.com>
+
+ * ar.c (main): Update bfd_plugin_set_program_name call.
+diff --git a/binutils/od-elf32_avr.c b/binutils/od-elf32_avr.c
+index 5ec99957fe9..1d32bce918e 100644
+--- a/binutils/od-elf32_avr.c
++++ b/binutils/od-elf32_avr.c
@@ -77,23 +77,29 @@ elf32_avr_filter (bfd *abfd)
return bfd_get_flavour (abfd) == bfd_target_elf_flavour;
}
@@ -70,7 +88,7 @@ Index: git/binutils/od-elf32_avr.c
{
Elf_External_Note *xnp = (Elf_External_Note *) contents;
Elf_Internal_Note in;
-@@ -107,42 +113,54 @@ static char* elf32_avr_get_note_desc (bf
+@@ -107,42 +113,54 @@ static char* elf32_avr_get_note_desc (bfd *abfd, char *contents,
if (in.namesz > contents - in.namedata + size)
return NULL;
@@ -163,25 +181,3 @@ Index: git/binutils/od-elf32_avr.c
}
elf32_avr_get_memory_usage (abfd, &text_usage, &data_usage,
-Index: git/binutils/ChangeLog
-===================================================================
---- git.orig/binutils/ChangeLog
-+++ git/binutils/ChangeLog
-@@ -1,3 +1,17 @@
-+2021-02-11 Alan Modra <amodra@gmail.com>
-+
-+ PR 27290
-+ PR 27293
-+ PR 27295
-+ * od-elf32_avr.c (elf32_avr_get_note_section_contents): Formatting.
-+ Use bfd_malloc_and_get_section.
-+ (elf32_avr_get_note_desc): Formatting. Return descsz. Sanity
-+ check namesz. Return NULL if descsz is too small. Ensure
-+ string table is terminated.
-+ (elf32_avr_get_device_info): Formatting. Add note_size param.
-+ Sanity check note.
-+ (elf32_avr_dump_mem_usage): Adjust to suit.
-+
- 2020-02-01 Nick Clifton <nickc@redhat.com>
-
- * configure: Regenerate.
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch b/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch
new file mode 100644
index 0000000000..2addf5139e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2021-46174.patch
@@ -0,0 +1,35 @@
+From 46322722ad40ac1a75672ae0f62f4969195f1368 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Thu, 20 Jan 2022 13:58:38 +1030
+Subject: [PATCH] PR28753, buffer overflow in read_section_stabs_debugging_info
+
+ PR 28753
+ * rddbg.c (read_section_stabs_debugging_info): Don't read past
+ end of section when concatentating stab strings.
+
+CVE: CVE-2021-46174
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=cad4d6b91e97]
+
+(cherry picked from commit 085b299b71721e15f5c5c5344dc3e4e4536dadba)
+(cherry picked from commit cad4d6b91e97b6962807d33c04ed7e7797788438)
+Signed-off-by: poojitha adireddy <pooadire@cisco.com>
+---
+ binutils/rddbg.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/binutils/rddbg.c b/binutils/rddbg.c
+index 72e934055b5..5e76d94a3c4 100644
+--- a/binutils/rddbg.c
++++ b/binutils/rddbg.c
+@@ -207,7 +207,7 @@ read_section_stabs_debugging_info (bfd *abfd, asymbol **syms, long symcount,
+ an attempt to read the byte before 'strings' would occur. */
+ while ((len = strlen (s)) > 0
+ && s[len - 1] == '\\'
+- && stab + 12 < stabs + stabsize)
++ && stab + 16 <= stabs + stabsize)
+ {
+ char *p;
+
+--
+2.23.1
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch
new file mode 100644
index 0000000000..102d65f8a6
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-38533.patch
@@ -0,0 +1,37 @@
+From ef186fe54aa6d281a3ff8a9528417e5cc614c797 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sat, 13 Aug 2022 15:32:47 +0930
+Subject: [PATCH] PR29482 - strip: heap-buffer-overflow
+
+ PR 29482
+ * coffcode.h (coff_set_section_contents): Sanity check _LIB.
+
+CVE: CVE-2022-38533
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ef186fe54aa6d281a3ff8a9528417e5cc614c797]
+
+Signed-off-by: Florin Diaconescu <florin.diaconescu009@gmail.com>
+
+---
+ bfd/coffcode.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/bfd/coffcode.h b/bfd/coffcode.h
+index dec2e9c6370..75c18d88602 100644
+--- a/bfd/coffcode.h
++++ b/bfd/coffcode.h
+@@ -4170,10 +4170,13 @@ coff_set_section_contents (bfd * abfd,
+
+ rec = (bfd_byte *) location;
+ recend = rec + count;
+- while (rec < recend)
++ while (recend - rec >= 4)
+ {
++ size_t len = bfd_get_32 (abfd, rec);
++ if (len == 0 || len > (size_t) (recend - rec) / 4)
++ break;
++ rec += len * 4;
+ ++section->lma;
+- rec += bfd_get_32 (abfd, rec) * 4;
+ }
+
+ BFD_ASSERT (rec == recend);
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch
new file mode 100644
index 0000000000..ddb564bc8c
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47007.patch
@@ -0,0 +1,32 @@
+From 0ebc886149c22aceaf8ed74267821a59ca9d03eb Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 17 Jun 2022 09:00:41 +0930
+Subject: [PATCH] PR29254, memory leak in stab_demangle_v3_arg
+
+ PR 29254
+ * stabs.c (stab_demangle_v3_arg): Free dt on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0ebc886149c22aceaf8ed74267821a59ca9d03eb]
+CVE: CVE-2022-47007
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/stabs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 2b5241637c1..796ff85b86a 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -5476,7 +5476,10 @@
+ dc->u.s_binary.right,
+ &varargs);
+ if (pargs == NULL)
+- return NULL;
++ {
++ free (dt);
++ return NULL;
++ }
+
+ return debug_make_function_type (dhandle, dt, pargs, varargs);
+ }
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch
new file mode 100644
index 0000000000..9527390ccf
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47008.patch
@@ -0,0 +1,64 @@
+From d6e1d48c83b165c129cb0aa78905f7ca80a1f682 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 17 Jun 2022 09:13:38 +0930
+Subject: [PATCH] PR29255, memory leak in make_tempdir
+
+ PR 29255
+ * bucomm.c (make_tempdir, make_tempname): Free template on all
+ failure paths.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=d6e1d48c83b165c129cb0aa78905f7ca80a1f682]
+CVE: CVE-2022-47008
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/bucomm.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/binutils/bucomm.c b/binutils/bucomm.c
+index fdc2209df9c..4395cb9f7f5 100644
+--- a/binutils/bucomm.c
++++ b/binutils/bucomm.c
+@@ -542,8 +542,9 @@
+ #else
+ tmpname = mktemp (tmpname);
+ if (tmpname == NULL)
+- return NULL;
+- fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
++ fd = -1;
++ else
++ fd = open (tmpname, O_RDWR | O_CREAT | O_EXCL, 0600);
+ #endif
+ if (fd == -1)
+ {
+@@ -561,22 +562,23 @@
+ make_tempdir (const char *filename)
+ {
+ char *tmpname = template_in_dir (filename);
++ char *ret;
+
+ #ifdef HAVE_MKDTEMP
+- return mkdtemp (tmpname);
++ ret = mkdtemp (tmpname);
+ #else
+- tmpname = mktemp (tmpname);
+- if (tmpname == NULL)
+- return NULL;
++ ret = mktemp (tmpname);
+ #if defined (_WIN32) && !defined (__CYGWIN32__)
+ if (mkdir (tmpname) != 0)
+- return NULL;
++ ret = NULL;
+ #else
+ if (mkdir (tmpname, 0700) != 0)
+- return NULL;
++ ret = NULL;
+ #endif
+- return tmpname;
+ #endif
++ if (ret == NULL)
++ free (tmpname);
++ return ret;
+ }
+
+ /* Parse a string into a VMA, with a fatal error if it can't be
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch
new file mode 100644
index 0000000000..d831ed4756
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47010.patch
@@ -0,0 +1,34 @@
+From 0d02e70b197c786f26175b9a73f94e01d14abdab Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 10:39:31 +0930
+Subject: [PATCH] PR29262, memory leak in pr_function_type
+
+ PR 29262
+ * prdbg.c (pr_function_type): Free "s" on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=0d02e70b197c786f26175b9a73f94e01d14abdab]
+CVE: CVE-2022-47010
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/prdbg.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/binutils/prdbg.c b/binutils/prdbg.c
+index c1e41628d26..bb42a5b6c2d 100644
+--- a/binutils/prdbg.c
++++ b/binutils/prdbg.c
+@@ -778,12 +778,9 @@
+
+ strcat (s, ")");
+
+- if (! substitute_type (info, s))
+- return FALSE;
+-
++ bfd_boolean ret = substitute_type (info, s);
+ free (s);
+-
+- return TRUE;
++ return ret;
+ }
+
+ /* Turn the top type on the stack into a reference to that type. */
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch
new file mode 100644
index 0000000000..250756bd38
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47011.patch
@@ -0,0 +1,31 @@
+From 8a24927bc8dbf6beac2000593b21235c3796dc35 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 20 Jun 2022 10:39:13 +0930
+Subject: [PATCH] PR29261, memory leak in parse_stab_struct_fields
+
+ PR 29261
+ * stabs.c (parse_stab_struct_fields): Free "fields" on failure path.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff_plain;h=8a24927bc8dbf6beac2000593b21235c3796dc35]
+CVE: CVE-2022-47011
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/stabs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/stabs.c b/binutils/stabs.c
+index 796ff85b86a..bf3f578cbcc 100644
+--- a/binutils/stabs.c
++++ b/binutils/stabs.c
+@@ -2368,7 +2368,10 @@
+
+ if (! parse_stab_one_struct_field (dhandle, info, pp, p, fields + c,
+ staticsp, p_end))
+- return FALSE;
++ {
++ free (fields);
++ return FALSE;
++ }
+
+ ++c;
+ }
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch
new file mode 100644
index 0000000000..101a4cdb4e
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-47695.patch
@@ -0,0 +1,57 @@
+From 3d3af4ba39e892b1c544d667ca241846bc3df386 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Sun, 4 Dec 2022 22:15:40 +1030
+Subject: [PATCH] PR29846, segmentation fault in objdump.c compare_symbols
+
+Fixes a fuzzed object file problem where plt relocs were manipulated
+in such a way that two synthetic symbols were generated at the same
+plt location. Won't occur in real object files.
+
+ PR 29846
+ PR 20337
+ * objdump.c (compare_symbols): Test symbol flags to exclude
+ section and synthetic symbols before attempting to check flavour.
+Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=3d3af4ba39e892b1c544d667ca241846bc3df386]
+CVE: CVE-2022-47695
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/objdump.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/binutils/objdump.c b/binutils/objdump.c
+index e8481b2d928..d95c8b68bf0 100644
+--- a/binutils/objdump.c
++++ b/binutils/objdump.c
+@@ -935,20 +935,17 @@
+ return 1;
+ }
+
+- if (bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour
++ /* Sort larger size ELF symbols before smaller. See PR20337. */
++ bfd_vma asz = 0;
++ if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
++ && bfd_get_flavour (bfd_asymbol_bfd (a)) == bfd_target_elf_flavour)
++ asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
++ bfd_vma bsz = 0;
++ if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0
+ && bfd_get_flavour (bfd_asymbol_bfd (b)) == bfd_target_elf_flavour)
+- {
+- bfd_vma asz, bsz;
+-
+- asz = 0;
+- if ((a->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- asz = ((elf_symbol_type *) a)->internal_elf_sym.st_size;
+- bsz = 0;
+- if ((b->flags & (BSF_SECTION_SYM | BSF_SYNTHETIC)) == 0)
+- bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
+- if (asz != bsz)
+- return asz > bsz ? -1 : 1;
+- }
++ bsz = ((elf_symbol_type *) b)->internal_elf_sym.st_size;
++ if (asz != bsz)
++ return asz > bsz ? -1 : 1;
+
+ /* Symbols that start with '.' might be section names, so sort them
+ after symbols that don't start with '.'. */
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
new file mode 100644
index 0000000000..f41c02a02b
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2022-48063.patch
@@ -0,0 +1,49 @@
+From 75393a2d54bcc40053e5262a3de9d70c5ebfbbfd Mon Sep 17 00:00:00 2001
+From: Nick Clifton <nickc@redhat.com>
+Date: Wed, 21 Dec 2022 11:51:23 +0000
+Subject: [PATCH] Fix an attempt to allocate an unreasonably large amount of
+ memory when parsing a corrupt ELF file.
+
+ PR 29924
+ * objdump.c (load_specific_debug_section): Check for excessively
+ large sections.
+Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=75393a2d54bcc40053e5262a3de9d70c5ebfbbfd]
+CVE: CVE-2022-48063
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+Comment: Patch refreshed based on codebase.
+---
+ binutils/ChangeLog | 6 ++++++
+ binutils/objdump.c | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/binutils/ChangeLog b/binutils/ChangeLog
+index e7f918d3f65..020e09f3700 100644
+--- a/binutils/ChangeLog
++++ b/binutils/ChangeLog
+@@ -1,3 +1,9 @@
++2022-12-21 Nick Clifton <nickc@redhat.com>
++
++ PR 29924
++ * objdump.c (load_specific_debug_section): Check for excessively
++ large sections.
++
+ 2021-02-11 Alan Modra <amodra@gmail.com>
+
+ PR 27290
+
+diff --git a/binutils/objdump.c b/binutils/objdump.c
+index d51abbe3858..2eb02de0e76 100644
+--- a/binutils/objdump.c
++++ b/binutils/objdump.c
+@@ -3479,7 +3479,9 @@
+ section->size = bfd_section_size (sec);
+ /* PR 24360: On 32-bit hosts sizeof (size_t) < sizeof (bfd_size_type). */
+ alloced = amt = section->size + 1;
+- if (alloced != amt || alloced == 0)
++ if (alloced != amt
++ || alloced == 0
++ || (bfd_get_size (abfd) != 0 && alloced >= bfd_get_size (abfd)))
+ {
+ section->start = NULL;
+ free_debug_section (debug);
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch b/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch
new file mode 100644
index 0000000000..732ea43210
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2023-25584.patch
@@ -0,0 +1,530 @@
+CVE: CVE-2023-25584
+Upstream-Status: Backport [ import from ubuntu http://archive.ubuntu.com/ubuntu/pool/main/b/binutils/binutils_2.34-6ubuntu1.7.debian.tar.xz upstream https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=77c225bdeb410cf60da804879ad41622f5f1aa44 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+[Ubuntu note: this is backport of the original patch, no major changes just
+ fix this patch for this release]
+From 77c225bdeb410cf60da804879ad41622f5f1aa44 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Mon, 12 Dec 2022 18:28:49 +1030
+Subject: [PATCH] Lack of bounds checking in vms-alpha.c parse_module
+
+ PR 29873
+ PR 29874
+ PR 29875
+ PR 29876
+ PR 29877
+ PR 29878
+ PR 29879
+ PR 29880
+ PR 29881
+ PR 29882
+ PR 29883
+ PR 29884
+ PR 29885
+ PR 29886
+ PR 29887
+ PR 29888
+ PR 29889
+ PR 29890
+ PR 29891
+ * vms-alpha.c (parse_module): Make length param bfd_size_type.
+ Delete length == -1 checks. Sanity check record_length.
+ Sanity check DST__K_MODBEG, DST__K_RTNBEG, DST__K_RTNEND lengths.
+ Sanity check DST__K_SOURCE and DST__K_LINE_NUM elements
+ before accessing.
+ (build_module_list): Pass dst_section size to parse_module.
+---
+ bfd/vms-alpha.c | 213 ++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 168 insertions(+), 45 deletions(-)
+
+--- binutils-2.34.orig/bfd/vms-alpha.c
++++ binutils-2.34/bfd/vms-alpha.c
+@@ -4267,7 +4267,7 @@ new_module (bfd *abfd)
+
+ static void
+ parse_module (bfd *abfd, struct module *module, unsigned char *ptr,
+- int length)
++ bfd_size_type length)
+ {
+ unsigned char *maxptr = ptr + length;
+ unsigned char *src_ptr, *pcl_ptr;
+@@ -4284,7 +4284,7 @@ parse_module (bfd *abfd, struct module *
+ curr_line = (struct lineinfo *) bfd_zalloc (abfd, sizeof (struct lineinfo));
+ module->line_table = curr_line;
+
+- while (length == -1 || ptr < maxptr)
++ while (ptr < maxptr)
+ {
+ /* The first byte is not counted in the recorded length. */
+ int rec_length = bfd_getl16 (ptr) + 1;
+@@ -4292,15 +4292,19 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((2, "DST record: leng %d, type %d\n", rec_length, rec_type));
+
+- if (length == -1 && rec_type == DST__K_MODEND)
++ if (rec_length > maxptr - ptr)
++ break;
++ if (rec_type == DST__K_MODEND)
+ break;
+
+ switch (rec_type)
+ {
+ case DST__K_MODBEG:
++ if (rec_length <= DST_S_B_MODBEG_NAME)
++ break;
+ module->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_MODBEG_NAME,
+- maxptr - (ptr + DST_S_B_MODBEG_NAME));
++ rec_length - DST_S_B_MODBEG_NAME);
+
+ curr_pc = 0;
+ prev_pc = 0;
+@@ -4314,11 +4318,13 @@ parse_module (bfd *abfd, struct module *
+ break;
+
+ case DST__K_RTNBEG:
++ if (rec_length <= DST_S_B_RTNBEG_NAME)
++ break;
+ funcinfo = (struct funcinfo *)
+ bfd_zalloc (abfd, sizeof (struct funcinfo));
+ funcinfo->name
+ = _bfd_vms_save_counted_string (abfd, ptr + DST_S_B_RTNBEG_NAME,
+- maxptr - (ptr + DST_S_B_RTNBEG_NAME));
++ rec_length - DST_S_B_RTNBEG_NAME);
+ funcinfo->low = bfd_getl32 (ptr + DST_S_L_RTNBEG_ADDRESS);
+ funcinfo->next = module->func_table;
+ module->func_table = funcinfo;
+@@ -4328,6 +4334,8 @@ parse_module (bfd *abfd, struct module *
+ break;
+
+ case DST__K_RTNEND:
++ if (rec_length < DST_S_L_RTNEND_SIZE + 4)
++ break;
+ module->func_table->high = module->func_table->low
+ + bfd_getl32 (ptr + DST_S_L_RTNEND_SIZE) - 1;
+
+@@ -4358,13 +4366,66 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((3, "source info\n"));
+
+- while (src_ptr < ptr + rec_length)
++ while (src_ptr - ptr < rec_length)
+ {
+ int cmd = src_ptr[0], cmd_length, data;
+
+ switch (cmd)
+ {
+ case DST__K_SRC_DECLFILE:
++ if (src_ptr - ptr + DST_S_B_SRC_DF_LENGTH >= rec_length)
++ cmd_length = 0x10000;
++ else
++ cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_DEFLINES_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_INCRLNUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SRC_SETFILE:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETLNUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETLNUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_SETREC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SRC_SETREC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SRC_FORMFEED:
++ cmd_length = 1;
++ break;
++
++ default:
++ cmd_length = 2;
++ break;
++ }
++
++ if (src_ptr - ptr + cmd_length > rec_length)
++ break;
++
++ switch (cmd)
++ {
++ case DST__K_SRC_DECLFILE:
+ {
+ unsigned int fileid
+ = bfd_getl16 (src_ptr + DST_S_W_SRC_DF_FILEID);
+@@ -4384,7 +4445,6 @@ parse_module (bfd *abfd, struct module *
+
+ module->file_table [fileid].name = filename;
+ module->file_table [fileid].srec = 1;
+- cmd_length = src_ptr[DST_S_B_SRC_DF_LENGTH] + 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DECLFILE: %d, %s\n",
+ fileid, module->file_table [fileid].name));
+ }
+@@ -4401,7 +4461,6 @@ parse_module (bfd *abfd, struct module *
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_B: %d\n", data));
+ break;
+
+@@ -4416,14 +4475,12 @@ parse_module (bfd *abfd, struct module *
+ srec->sfile = curr_srec->sfile;
+ curr_srec->next = srec;
+ curr_srec = srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_DEFLINES_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_INCRLNUM_B:
+ data = src_ptr[DST_S_B_SRC_UNSBYTE];
+ curr_srec->line += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST_S_C_SRC_INCRLNUM_B: %d\n", data));
+ break;
+
+@@ -4431,21 +4488,18 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->sfile = data;
+ curr_srec->srec = module->file_table[data].srec;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETFILE: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_L:
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->line = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_L: %d\n", data));
+ break;
+
+ case DST__K_SRC_SETLNUM_W:
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->line = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETLNUM_W: %d\n", data));
+ break;
+
+@@ -4453,7 +4507,6 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl32 (src_ptr + DST_S_L_SRC_UNSLONG);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_L: %d\n", data));
+ break;
+
+@@ -4461,19 +4514,16 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl16 (src_ptr + DST_S_W_SRC_UNSWORD);
+ curr_srec->srec = data;
+ module->file_table[curr_srec->sfile].srec = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST_S_C_SRC_SETREC_W: %d\n", data));
+ break;
+
+ case DST__K_SRC_FORMFEED:
+- cmd_length = 1;
+ vms_debug2 ((4, "DST_S_C_SRC_FORMFEED\n"));
+ break;
+
+ default:
+ _bfd_error_handler (_("unknown source command %d"),
+ cmd);
+- cmd_length = 2;
+ break;
+ }
+
+@@ -4486,7 +4536,7 @@ parse_module (bfd *abfd, struct module *
+
+ vms_debug2 ((3, "line info\n"));
+
+- while (pcl_ptr < ptr + rec_length)
++ while (pcl_ptr - ptr < rec_length)
+ {
+ /* The command byte is signed so we must sign-extend it. */
+ int cmd = ((signed char *)pcl_ptr)[0], cmd_length, data;
+@@ -4494,10 +4544,106 @@ parse_module (bfd *abfd, struct module *
+ switch (cmd)
+ {
+ case DST__K_DELTA_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_DELTA_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_INCR_LINUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_INCR_LINUM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_INCR_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_LINUM_INCR:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM_INCR_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_RESET_LINUM_INCR:
++ cmd_length = 1;
++ break;
++
++ case DST__K_BEG_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_END_STMT_MODE:
++ cmd_length = 1;
++ break;
++
++ case DST__K_SET_LINUM_B:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_LINUM:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_LINUM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_PC:
++ cmd_length = 2;
++ break;
++
++ case DST__K_SET_PC_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_SET_PC_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_STMTNUM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM:
++ cmd_length = 2;
++ break;
++
++ case DST__K_TERM_W:
++ cmd_length = 3;
++ break;
++
++ case DST__K_TERM_L:
++ cmd_length = 5;
++ break;
++
++ case DST__K_SET_ABS_PC:
++ cmd_length = 5;
++ break;
++
++ default:
++ if (cmd <= 0)
++ cmd_length = 1;
++ else
++ cmd_length = 2;
++ break;
++ }
++
++ if (pcl_ptr - ptr + cmd_length > rec_length)
++ break;
++
++ switch (cmd)
++ {
++ case DST__K_DELTA_PC_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_DELTA_PC_W: %d\n", data));
+ break;
+
+@@ -4505,131 +4651,111 @@ parse_module (bfd *abfd, struct module *
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+ curr_linenum += 1;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_DELTA_PC_L: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_INCR_LINUM: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_W: %d\n", data));
+ break;
+
+ case DST__K_INCR_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_INCR_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_LINUM_INCR_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_LINUM_INCR_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_RESET_LINUM_INCR:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_RESET_LINUM_INCR");
+- cmd_length = 1;
+ break;
+
+ case DST__K_BEG_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_BEG_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_END_STMT_MODE:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_END_STMT_MODE");
+- cmd_length = 1;
+ break;
+
+ case DST__K_SET_LINUM_B:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_linenum = data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_SET_LINUM_B: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_linenum = data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_SET_LINE_NUM: %d\n", data));
+ break;
+
+ case DST__K_SET_LINUM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_linenum = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_LINUM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_PC:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC");
+- cmd_length = 2;
+ break;
+
+ case DST__K_SET_PC_W:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_W");
+- cmd_length = 3;
+ break;
+
+ case DST__K_SET_PC_L:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_PC_L");
+- cmd_length = 5;
+ break;
+
+ case DST__K_SET_STMTNUM:
+ _bfd_error_handler
+ (_("%s not implemented"), "DST__K_SET_STMTNUM");
+- cmd_length = 2;
+ break;
+
+ case DST__K_TERM:
+ data = pcl_ptr[DST_S_B_PCLINE_UNSBYTE];
+ curr_pc += data;
+- cmd_length = 2;
+ vms_debug2 ((4, "DST__K_TERM: %d\n", data));
+ break;
+
+ case DST__K_TERM_W:
+ data = bfd_getl16 (pcl_ptr + DST_S_W_PCLINE_UNSWORD);
+ curr_pc += data;
+- cmd_length = 3;
+ vms_debug2 ((4, "DST__K_TERM_W: %d\n", data));
+ break;
+
+ case DST__K_TERM_L:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc += data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_TERM_L: %d\n", data));
+ break;
+
+ case DST__K_SET_ABS_PC:
+ data = bfd_getl32 (pcl_ptr + DST_S_L_PCLINE_UNSLONG);
+ curr_pc = data;
+- cmd_length = 5;
+ vms_debug2 ((4, "DST__K_SET_ABS_PC: 0x%x\n", data));
+ break;
+
+@@ -4638,15 +4764,11 @@ parse_module (bfd *abfd, struct module *
+ {
+ curr_pc -= cmd;
+ curr_linenum += 1;
+- cmd_length = 1;
+ vms_debug2 ((4, "bump pc to 0x%lx and line to %d\n",
+ (unsigned long)curr_pc, curr_linenum));
+ }
+ else
+- {
+- _bfd_error_handler (_("unknown line command %d"), cmd);
+- cmd_length = 2;
+- }
++ _bfd_error_handler (_("unknown line command %d"), cmd);
+ break;
+ }
+
+@@ -4778,7 +4900,7 @@ build_module_list (bfd *abfd)
+ return NULL;
+
+ module = new_module (abfd);
+- parse_module (abfd, module, PRIV (dst_section)->contents, -1);
++ parse_module (abfd, module, PRIV (dst_section)->contents, PRIV (dst_section)->size);
+ list = module;
+ }
+
diff --git a/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch b/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch
new file mode 100644
index 0000000000..aa5ce5f3ff
--- /dev/null
+++ b/meta/recipes-devtools/binutils/binutils/CVE-2023-25588.patch
@@ -0,0 +1,149 @@
+From d12f8998d2d086f0a6606589e5aedb7147e6f2f1 Mon Sep 17 00:00:00 2001
+From: Alan Modra <amodra@gmail.com>
+Date: Fri, 14 Oct 2022 10:30:21 +1030
+Subject: [PATCH] PR29677, Field `the_bfd` of `asymbol` is uninitialised
+
+Besides not initialising the_bfd of synthetic symbols, counting
+symbols when sizing didn't match symbols created if there were any
+dynsyms named "". We don't want synthetic symbols without names
+anyway, so get rid of them. Also, simplify and correct sanity checks.
+
+ PR 29677
+ * mach-o.c (bfd_mach_o_get_synthetic_symtab): Rewrite.
+---
+Upstream-Status: Backport from [https://sourceware.org/git/?p=binutils-gdb.git;a=patch;h=d12f8998d2d086f0a6606589e5aedb7147e6f2f1]
+CVE: CVE-2023-25588
+CVE: CVE-2022-47696
+
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+Signed-off-by: poojitha adireddy <pooadire@cisco.com>
+
+ bfd/mach-o.c | 72 ++++++++++++++++++++++------------------------------
+ 1 file changed, 31 insertions(+), 41 deletions(-)
+
+diff --git a/bfd/mach-o.c b/bfd/mach-o.c
+index acb35e7f0c6..5279343768c 100644
+--- a/bfd/mach-o.c
++++ b/bfd/mach-o.c
+@@ -938,11 +938,9 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ bfd_mach_o_symtab_command *symtab = mdata->symtab;
+ asymbol *s;
+ char * s_start;
+- char * s_end;
+ unsigned long count, i, j, n;
+ size_t size;
+ char *names;
+- char *nul_name;
+ const char stub [] = "$stub";
+
+ *ret = NULL;
+@@ -955,27 +953,27 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ /* We need to allocate a bfd symbol for every indirect symbol and to
+ allocate the memory for its name. */
+ count = dysymtab->nindirectsyms;
+- size = count * sizeof (asymbol) + 1;
+-
++ size = 0;
+ for (j = 0; j < count; j++)
+ {
+- const char * strng;
+ unsigned int isym = dysymtab->indirect_syms[j];
++ const char *str;
+
+ /* Some indirect symbols are anonymous. */
+- if (isym < symtab->nsyms && (strng = symtab->symbols[isym].symbol.name))
+- /* PR 17512: file: f5b8eeba. */
+- size += strnlen (strng, symtab->strsize - (strng - symtab->strtab)) + sizeof (stub);
++ if (isym < symtab->nsyms
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
++ {
++ /* PR 17512: file: f5b8eeba. */
++ size += strnlen (str, symtab->strsize - (str - symtab->strtab));
++ size += sizeof (stub);
++ }
+ }
+
+- s_start = bfd_malloc (size);
++ s_start = bfd_malloc (size + count * sizeof (asymbol));
+ s = *ret = (asymbol *) s_start;
+ if (s == NULL)
+ return -1;
+ names = (char *) (s + count);
+- nul_name = names;
+- *names++ = 0;
+- s_end = s_start + size;
+
+ n = 0;
+ for (i = 0; i < mdata->nsects; i++)
+@@ -997,47 +995,39 @@ bfd_mach_o_get_synthetic_symtab (bfd *abfd,
+ entry_size = bfd_mach_o_section_get_entry_size (abfd, sec);
+
+ /* PR 17512: file: 08e15eec. */
+- if (first >= count || last >= count || first > last)
++ if (first >= count || last > count || first > last)
+ goto fail;
+
+ for (j = first; j < last; j++)
+ {
+ unsigned int isym = dysymtab->indirect_syms[j];
+-
+- /* PR 17512: file: 04d64d9b. */
+- if (((char *) s) + sizeof (* s) > s_end)
+- goto fail;
+-
+- s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
+- s->section = sec->bfdsection;
+- s->value = addr - sec->addr;
+- s->udata.p = NULL;
++ const char *str;
++ size_t len;
+
+ if (isym < symtab->nsyms
+- && symtab->symbols[isym].symbol.name)
++ && (str = symtab->symbols[isym].symbol.name) != NULL)
+ {
+- const char *sym = symtab->symbols[isym].symbol.name;
+- size_t len;
+-
+- s->name = names;
+- len = strlen (sym);
+- /* PR 17512: file: 47dfd4d2. */
+- if (names + len >= s_end)
++ /* PR 17512: file: 04d64d9b. */
++ if (n >= count)
+ goto fail;
+- memcpy (names, sym, len);
+- names += len;
+- /* PR 17512: file: 18f340a4. */
+- if (names + sizeof (stub) >= s_end)
++ len = strnlen (str, symtab->strsize - (str - symtab->strtab));
++ /* PR 17512: file: 47dfd4d2, 18f340a4. */
++ if (size < len + sizeof (stub))
+ goto fail;
+- memcpy (names, stub, sizeof (stub));
+- names += sizeof (stub);
++ memcpy (names, str, len);
++ memcpy (names + len, stub, sizeof (stub));
++ s->name = names;
++ names += len + sizeof (stub);
++ size -= len + sizeof (stub);
++ s->the_bfd = symtab->symbols[isym].symbol.the_bfd;
++ s->flags = BSF_GLOBAL | BSF_SYNTHETIC;
++ s->section = sec->bfdsection;
++ s->value = addr - sec->addr;
++ s->udata.p = NULL;
++ s++;
++ n++;
+ }
+- else
+- s->name = nul_name;
+-
+ addr += entry_size;
+- s++;
+- n++;
+ }
+ break;
+ default:
+--
+2.39.3
+
diff --git a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
index f8af79ddd5..870009c2ba 100644
--- a/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
+++ b/meta/recipes-devtools/cmake/cmake/OEToolchainConfig.cmake
@@ -12,13 +12,13 @@ set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set(CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX "$ENV{OE_CMAKE_FIND_LIBRARY_CUSTOM_LIB_SUFFIX}")
-# Set CMAKE_SYSTEM_PROCESSOR from the sysroot name (assuming processor-distro-os).
-if ($ENV{SDKTARGETSYSROOT} MATCHES "/sysroots/([a-zA-Z0-9_-]+)-.+-.+")
- set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_MATCH_1})
-endif()
+set( CMAKE_SYSTEM_PROCESSOR $ENV{OECORE_TARGET_ARCH} )
# Include the toolchain configuration subscripts
file( GLOB toolchain_config_files "${CMAKE_TOOLCHAIN_FILE}.d/*.cmake" )
foreach(config ${toolchain_config_files})
include(${config})
endforeach()
+
+unset(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES)
+unset(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES)
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch
new file mode 100644
index 0000000000..f1d449acbe
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p1.patch
@@ -0,0 +1,236 @@
+From 24def311c6168d0dfb7c5f0f183b72b709c49265 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:21 +0100
+Subject: [PATCH] dmidecode: Split table fetching from decoding
+
+Clean up function dmi_table so that it does only one thing:
+* dmi_table() is renamed to dmi_table_get(). It now retrieves the
+ DMI table, but does not process it any longer.
+* Decoding or dumping the table is now done in smbios3_decode(),
+ smbios_decode() and legacy_decode().
+No functional change.
+
+A side effect of this change is that writing the header and body of
+dump files is now done in a single location. This is required to
+further consolidate the writing of dump files.
+
+CVE-ID: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=39b2dd7b6ab7]
+
+Backport Changes:
+- In the file dmidecode.c, the commit [dd593d2] in v3.3 introduces
+ pr_info(). This is backported to printf() as per v3.2.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit 39b2dd7b6ab719b920e96ed832cfb4bdd664e808)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+---
+ dmidecode.c | 86 ++++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 62 insertions(+), 24 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index a3e9d6c..d6eedd1 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5211,8 +5211,9 @@ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+ }
+ }
+
+-static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+- u32 flags)
++/* Allocates a buffer for the table, must be freed by the caller */
++static u8 *dmi_table_get(off_t base, u32 *len, u16 num, u32 ver,
++ const char *devmem, u32 flags)
+ {
+ u8 *buf;
+
+@@ -5231,7 +5232,7 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ {
+ if (num)
+ printf("%u structures occupying %u bytes.\n",
+- num, len);
++ num, *len);
+ if (!(opt.flags & FLAG_FROM_DUMP))
+ printf("Table at 0x%08llX.\n",
+ (unsigned long long)base);
+@@ -5249,19 +5250,19 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ * would be the result of the kernel truncating the table on
+ * parse error.
+ */
+- size_t size = len;
++ size_t size = *len;
+ buf = read_file(flags & FLAG_NO_FILE_OFFSET ? 0 : base,
+ &size, devmem);
+- if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)len)
++ if (!(opt.flags & FLAG_QUIET) && num && size != (size_t)*len)
+ {
+ fprintf(stderr, "Wrong DMI structures length: %u bytes "
+ "announced, only %lu bytes available.\n",
+- len, (unsigned long)size);
++ *len, (unsigned long)size);
+ }
+- len = size;
++ *len = size;
+ }
+ else
+- buf = mem_chunk(base, len, devmem);
++ buf = mem_chunk(base, *len, devmem);
+
+ if (buf == NULL)
+ {
+@@ -5271,15 +5272,9 @@ static void dmi_table(off_t base, u32 len, u16 num, u32 ver, const char *devmem,
+ fprintf(stderr,
+ "Try compiling dmidecode with -DUSE_MMAP.\n");
+ #endif
+- return;
+ }
+
+- if (opt.flags & FLAG_DUMP_BIN)
+- dmi_table_dump(buf, len);
+- else
+- dmi_table_decode(buf, len, num, ver >> 8, flags);
+-
+- free(buf);
++ return buf;
+ }
+
+
+@@ -5314,8 +5309,9 @@ static void overwrite_smbios3_address(u8 *buf)
+
+ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u32 ver;
++ u32 ver, len;
+ u64 offset;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x06] > 0x20)
+@@ -5341,8 +5337,12 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ return 0;
+ }
+
+- dmi_table(((off_t)offset.h << 32) | offset.l,
+- DWORD(buf + 0x0C), 0, ver, devmem, flags | FLAG_STOP_AT_EOT);
++ /* Maximum length, may get trimmed */
++ len = DWORD(buf + 0x0C);
++ table = dmi_table_get(((off_t)offset.h << 32) | offset.l, &len, 0, ver,
++ devmem, flags | FLAG_STOP_AT_EOT);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5351,18 +5351,28 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", crafted[0x06],
+ opt.dumpfile);
+ write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, 0, ver >> 8,
++ flags | FLAG_STOP_AT_EOT);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ {
+- u16 ver;
++ u16 ver, num;
++ u32 len;
++ u8 *table;
+
+ /* Don't let checksum run beyond the buffer */
+ if (buf[0x05] > 0x20)
+@@ -5402,8 +5412,13 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ printf("SMBIOS %u.%u present.\n",
+ ver >> 8, ver & 0xFF);
+
+- dmi_table(DWORD(buf + 0x18), WORD(buf + 0x16), WORD(buf + 0x1C),
+- ver << 8, devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x16);
++ num = WORD(buf + 0x1C);
++ table = dmi_table_get(DWORD(buf + 0x18), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5412,27 +5427,43 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", crafted[0x05],
+ opt.dumpfile);
+ write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
+
+ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ {
++ u16 ver, num;
++ u32 len;
++ u8 *table;
++
+ if (!checksum(buf, 0x0F))
+ return 0;
+
++ ver = ((buf[0x0E] & 0xF0) << 4) + (buf[0x0E] & 0x0F);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("Legacy DMI %u.%u present.\n",
+ buf[0x0E] >> 4, buf[0x0E] & 0x0F);
+
+- dmi_table(DWORD(buf + 0x08), WORD(buf + 0x06), WORD(buf + 0x0C),
+- ((buf[0x0E] & 0xF0) << 12) + ((buf[0x0E] & 0x0F) << 8),
+- devmem, flags);
++ /* Maximum length, may get trimmed */
++ len = WORD(buf + 0x06);
++ num = WORD(buf + 0x0C);
++ table = dmi_table_get(DWORD(buf + 0x08), &len, num, ver << 8,
++ devmem, flags);
++ if (table == NULL)
++ return 1;
+
+ if (opt.flags & FLAG_DUMP_BIN)
+ {
+@@ -5441,11 +5472,18 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
++ dmi_table_dump(table, len);
+ if (!(opt.flags & FLAG_QUIET))
+ printf("# Writing %d bytes to %s.\n", 0x0F,
+ opt.dumpfile);
+ write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
+ }
++ else
++ {
++ dmi_table_decode(table, len, num, ver, flags);
++ }
++
++ free(table);
+
+ return 1;
+ }
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch
new file mode 100644
index 0000000000..353c2553f5
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630-dependent_p2.patch
@@ -0,0 +1,198 @@
+From 58e8a07b1aef0e53af1642b30248255e53e42790 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:25 +0100
+Subject: [PATCH] dmidecode: Write the whole dump file at once
+
+When option --dump-bin is used, write the whole dump file at once,
+instead of opening and closing the file separately for the table
+and then for the entry point.
+
+As the file writing function is no longer generic, it gets moved
+from util.c to dmidecode.c.
+
+One minor functional change resulting from the new implementation is
+that the entry point is written first now, so the messages printed
+are swapped.
+
+CVE: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=d8cfbc808f38]
+
+Backport Changes:
+- In the file dmidecode.c, the commit [2241f1d] in v3.3 introduces
+ pr_info(). This is backported to printf() as per v3.2.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit d8cfbc808f387e87091c25e7d5b8c2bb348bb206)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ dmidecode.c | 69 +++++++++++++++++++++++++++++++++++++++--------------
+ util.c | 40 -------------------------------
+ util.h | 1 -
+ 3 files changed, 51 insertions(+), 59 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index d6eedd1..b91e53b 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -5094,11 +5094,56 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ }
+ }
+
+-static void dmi_table_dump(const u8 *buf, u32 len)
++static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
++ u32 table_len)
+ {
++ FILE *f;
++
++ f = fopen(opt.dumpfile, "wb");
++ if (!f)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fopen");
++ return -1;
++ }
++
++ if (!(opt.flags & FLAG_QUIET))
++ printf("# Writing %d bytes to %s.\n", ep_len, opt.dumpfile);
++ if (fwrite(ep, ep_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fseek(f, 32, SEEK_SET) != 0)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fseek");
++ goto err_close;
++ }
++
+ if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", len, opt.dumpfile);
+- write_dump(32, len, buf, opt.dumpfile, 0);
++ printf("# Writing %d bytes to %s.\n", table_len, opt.dumpfile);
++ if (fwrite(table, table_len, 1, f) != 1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fwrite");
++ goto err_close;
++ }
++
++ if (fclose(f))
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("fclose");
++ return -1;
++ }
++
++ return 0;
++
++err_close:
++ fclose(f);
++ return -1;
+ }
+
+ static void dmi_table_decode(u8 *buf, u32 len, u16 num, u16 ver, u32 flags)
+@@ -5351,11 +5396,7 @@ static int smbios3_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_smbios3_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", crafted[0x06],
+- opt.dumpfile);
+- write_dump(0, crafted[0x06], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x06], table, len);
+ }
+ else
+ {
+@@ -5427,11 +5468,7 @@ static int smbios_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 32);
+ overwrite_dmi_address(crafted + 0x10);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", crafted[0x05],
+- opt.dumpfile);
+- write_dump(0, crafted[0x05], crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, crafted[0x05], table, len);
+ }
+ else
+ {
+@@ -5472,11 +5509,7 @@ static int legacy_decode(u8 *buf, const char *devmem, u32 flags)
+ memcpy(crafted, buf, 16);
+ overwrite_dmi_address(crafted);
+
+- dmi_table_dump(table, len);
+- if (!(opt.flags & FLAG_QUIET))
+- printf("# Writing %d bytes to %s.\n", 0x0F,
+- opt.dumpfile);
+- write_dump(0, 0x0F, crafted, opt.dumpfile, 1);
++ dmi_table_dump(crafted, 0x0F, table, len);
+ }
+ else
+ {
+diff --git a/util.c b/util.c
+index eeffdae..2e1931c 100644
+--- a/util.c
++++ b/util.c
+@@ -247,46 +247,6 @@ out:
+ return p;
+ }
+
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add)
+-{
+- FILE *f;
+-
+- f = fopen(dumpfile, add ? "r+b" : "wb");
+- if (!f)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fopen");
+- return -1;
+- }
+-
+- if (fseek(f, base, SEEK_SET) != 0)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fseek");
+- goto err_close;
+- }
+-
+- if (fwrite(data, len, 1, f) != 1)
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fwrite");
+- goto err_close;
+- }
+-
+- if (fclose(f))
+- {
+- fprintf(stderr, "%s: ", dumpfile);
+- perror("fclose");
+- return -1;
+- }
+-
+- return 0;
+-
+-err_close:
+- fclose(f);
+- return -1;
+-}
+-
+ /* Returns end - start + 1, assuming start < end */
+ u64 u64_range(u64 start, u64 end)
+ {
+diff --git a/util.h b/util.h
+index 3094cf8..ef24eb9 100644
+--- a/util.h
++++ b/util.h
+@@ -27,5 +27,4 @@
+ int checksum(const u8 *buf, size_t len);
+ void *read_file(off_t base, size_t *len, const char *filename);
+ void *mem_chunk(off_t base, size_t len, const char *devmem);
+-int write_dump(size_t base, size_t len, const void *data, const char *dumpfile, int add);
+ u64 u64_range(u64 start, u64 end);
diff --git a/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch
new file mode 100644
index 0000000000..bf4d060c8c
--- /dev/null
+++ b/meta/recipes-devtools/dmidecode/dmidecode/CVE-2023-30630.patch
@@ -0,0 +1,62 @@
+From b7dacccff32294ea522df32a9391d0218e7600ea Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Mon, 20 Feb 2023 14:53:31 +0100
+Subject: [PATCH] dmidecode: Do not let --dump-bin overwrite an existing file
+
+Make sure that the file passed to option --dump-bin does not already
+exist. In practice, it is rather unlikely that an honest user would
+want to overwrite an existing dump file, while this possibility
+could be used by a rogue user to corrupt a system file.
+
+CVE: CVE-2023-30630
+Upstream-Status: Backport [https://git.savannah.nongnu.org/cgit/dmidecode.git/commit/?id=6ca381c1247c]
+
+Backport Changes:
+- Ignored changes in man/dmidecode.8 file.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Reviewed-by: Jerry Hoemann <jerry.hoemann@hpe.com>
+(cherry picked from commit 6ca381c1247c81f74e1ca4e7706f70bdda72e6f2)
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ dmidecode.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/dmidecode.c b/dmidecode.c
+index b91e53b..846d9a1 100644
+--- a/dmidecode.c
++++ b/dmidecode.c
+@@ -60,6 +60,7 @@
+ * https://www.dmtf.org/sites/default/files/DSP0270_1.0.1.pdf
+ */
+
++#include <fcntl.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <strings.h>
+@@ -5097,13 +5098,22 @@ static void dmi_table_string(const struct dmi_header *h, const u8 *data, u16 ver
+ static int dmi_table_dump(const u8 *ep, u32 ep_len, const u8 *table,
+ u32 table_len)
+ {
++ int fd;
+ FILE *f;
+
+- f = fopen(opt.dumpfile, "wb");
++ fd = open(opt.dumpfile, O_WRONLY|O_CREAT|O_EXCL, 0666);
++ if (fd == -1)
++ {
++ fprintf(stderr, "%s: ", opt.dumpfile);
++ perror("open");
++ return -1;
++ }
++
++ f = fdopen(fd, "wb");
+ if (!f)
+ {
+ fprintf(stderr, "%s: ", opt.dumpfile);
+- perror("fopen");
++ perror("fdopen");
+ return -1;
+ }
+
diff --git a/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb b/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
index 8caffb5cc3..1e7c38dc8a 100644
--- a/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
+++ b/meta/recipes-devtools/dmidecode/dmidecode_3.2.bb
@@ -6,6 +6,9 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263"
SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/dmidecode/${BP}.tar.xz \
file://0001-Committing-changes-from-do_unpack_extra.patch \
+ file://CVE-2023-30630-dependent_p1.patch \
+ file://CVE-2023-30630-dependent_p2.patch \
+ file://CVE-2023-30630.patch \
"
COMPATIBLE_HOST = "(i.86|x86_64|aarch64|arm|powerpc|powerpc64).*-linux"
diff --git a/meta/recipes-devtools/dpkg/dpkg_1.19.7.bb b/meta/recipes-devtools/dpkg/dpkg_1.19.8.bb
index e9dec337b3..9e6e9f2464 100644
--- a/meta/recipes-devtools/dpkg/dpkg_1.19.7.bb
+++ b/meta/recipes-devtools/dpkg/dpkg_1.19.8.bb
@@ -18,5 +18,5 @@ SRC_URI_append_class-native = " \
file://tweak-options-require-tar-1.27.patch \
"
-SRC_URI[md5sum] = "60f57c5494e6dfa177504d47bfa0e383"
-SRC_URI[sha256sum] = "4c27fededf620c0aa522fff1a48577ba08144445341257502e7730f2b1a296e8"
+SRC_URI[md5sum] = "9d170c8baa1aa36b09698c909f304508"
+SRC_URI[sha256sum] = "2632c00b0cf0ea19ed7bd6700e6ec5faca93f0045af629d356dc03ad74ae6f10"
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch
new file mode 100644
index 0000000000..34e2567b25
--- /dev/null
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/CVE-2022-1304.patch
@@ -0,0 +1,42 @@
+From a66071ed6a0d1fa666d22dcb78fa6fcb3bf22df3 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 27 May 2022 14:01:50 +0530
+Subject: [PATCH] CVE-2022-1304
+
+Upstream-Status: Backport [https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/commit/?h=maint&id=ab51d587bb9b229b1fade1afd02e1574c1ba5c76]
+CVE: CVE-2022-1304
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ lib/ext2fs/extent.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/ext2fs/extent.c b/lib/ext2fs/extent.c
+index ac3dbfec9..a1b1905cd 100644
+--- a/lib/ext2fs/extent.c
++++ b/lib/ext2fs/extent.c
+@@ -495,6 +495,10 @@ retry:
+ ext2fs_le16_to_cpu(eh->eh_entries);
+ newpath->max_entries = ext2fs_le16_to_cpu(eh->eh_max);
+
++ /* Make sure there is at least one extent present */
++ if (newpath->left <= 0)
++ return EXT2_ET_EXTENT_NO_DOWN;
++
+ if (path->left > 0) {
+ ix++;
+ newpath->end_blk = ext2fs_le32_to_cpu(ix->ei_block);
+@@ -1630,6 +1634,10 @@ errcode_t ext2fs_extent_delete(ext2_extent_handle_t handle, int flags)
+
+ cp = path->curr;
+
++ /* Sanity check before memmove() */
++ if (path->left < 0)
++ return EXT2_ET_EXTENT_LEAF_BAD;
++
+ if (path->left) {
+ memmove(cp, cp + sizeof(struct ext3_extent_idx),
+ path->left * sizeof(struct ext3_extent_idx));
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
index c97c0377e9..279923db8e 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs/run-ptest
@@ -8,3 +8,4 @@ rm -f *.tmp
rm -f *.ok
rm -f *.failed
rm -f *.log
+cp ../data/test_data.tmp ./
diff --git a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb
index 3bc530e02b..565c433866 100644
--- a/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb
+++ b/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.45.7.bb
@@ -6,6 +6,7 @@ SRC_URI += "file://remove.ldconfig.call.patch \
file://mkdir_p.patch \
file://0001-configure.ac-correct-AM_GNU_GETTEXT.patch \
file://0001-intl-do-not-try-to-use-gettext-defines-that-no-longe.patch \
+ file://CVE-2022-1304.patch \
"
SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch \
@@ -53,6 +54,7 @@ do_install () {
oe_multilib_header ext2fs/ext2_types.h
install -d ${D}${base_bindir}
mv ${D}${bindir}/chattr ${D}${base_bindir}/chattr.e2fsprogs
+ mv ${D}${bindir}/lsattr ${D}${base_bindir}/lsattr.e2fsprogs
install -v -m 755 ${S}/contrib/populate-extfs.sh ${D}${base_sbindir}/
@@ -101,10 +103,12 @@ FILES_libe2p = "${base_libdir}/libe2p.so.*"
FILES_libext2fs = "${libdir}/e2initrd_helper ${base_libdir}/libext2fs.so.*"
FILES_${PN}-dev += "${datadir}/*/*.awk ${datadir}/*/*.sed ${base_libdir}/*.so ${bindir}/compile_et ${bindir}/mk_cmds"
-ALTERNATIVE_${PN} = "chattr"
+ALTERNATIVE_${PN} = "chattr lsattr"
ALTERNATIVE_PRIORITY = "100"
ALTERNATIVE_LINK_NAME[chattr] = "${base_bindir}/chattr"
ALTERNATIVE_TARGET[chattr] = "${base_bindir}/chattr.e2fsprogs"
+ALTERNATIVE_LINK_NAME[lsattr] = "${base_bindir}/lsattr"
+ALTERNATIVE_TARGET[lsattr] = "${base_bindir}/lsattr.e2fsprogs"
ALTERNATIVE_${PN}-doc = "fsck.8"
ALTERNATIVE_LINK_NAME[fsck.8] = "${mandir}/man8/fsck.8"
@@ -140,4 +144,7 @@ do_install_ptest() {
install -d ${D}${PTEST_PATH}/lib
install -m 0644 ${B}/lib/config.h ${D}${PTEST_PATH}/lib/
+
+ install -d ${D}${PTEST_PATH}/data
+ install -m 0644 ${B}/tests/test_data.tmp ${D}${PTEST_PATH}/data/
}
diff --git a/meta/recipes-devtools/elfutils/elfutils_0.178.bb b/meta/recipes-devtools/elfutils/elfutils_0.178.bb
index 97d033e356..29a3bbfffb 100644
--- a/meta/recipes-devtools/elfutils/elfutils_0.178.bb
+++ b/meta/recipes-devtools/elfutils/elfutils_0.178.bb
@@ -34,6 +34,7 @@ SRC_URI = "https://sourceware.org/elfutils/ftp/${PV}/${BP}.tar.bz2 \
file://0001-ppc_initreg.c-Incliude-asm-ptrace.h-for-pt_regs-defi.patch \
file://run-ptest \
file://ptest.patch \
+ file://CVE-2021-33294.patch \
"
SRC_URI_append_libc-musl = " \
file://0001-musl-obstack-fts.patch \
diff --git a/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch b/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch
new file mode 100644
index 0000000000..0500a4cf83
--- /dev/null
+++ b/meta/recipes-devtools/elfutils/files/CVE-2021-33294.patch
@@ -0,0 +1,72 @@
+From 480b6fa3662ba8ffeee274bf0d37423413c01e55 Mon Sep 17 00:00:00 2001
+From: Mark Wielaard <mark@klomp.org>
+Date: Wed, 3 Mar 2021 21:40:53 +0100
+Subject: [PATCH] readelf: Sanity check verneed and verdef offsets in handle_symtab.
+
+We are going through vna_next, vn_next and vd_next in a while loop.
+Make sure that all offsets are sane. We don't want things to wrap
+around so we go in cycles.
+
+https://sourceware.org/bugzilla/show_bug.cgi?id=27501
+
+Signed-off-by: Mark Wielaard <mark@klomp.org>
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=elfutils.git;a=commit;h=480b6fa3662ba8ffeee274bf0d37423413c01e55]
+CVE: CVE-2021-33294
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ChangeLog | 5 +++++
+ src/readelf.c | 10 +++++++++-
+ 2 files changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/src/ChangeLog b/src/ChangeLog
+index 6af977e..f0d9e39 100644
+--- a/src/ChangeLog
++++ b/src/ChangeLog
+@@ -1,3 +1,8 @@
++2021-03-03 Mark Wielaard <mark@klomp.org>
++
++ * readelf.c (handle_symtab): Sanity check verneed vna_next,
++ vn_next and verdef vd_next offsets.
++
+ 2019-11-26 Mark Wielaard <mark@klomp.org>
+
+ * Makefile.am (BUILD_STATIC): Add libraries needed for libdw.
+diff --git a/src/readelf.c b/src/readelf.c
+index 5994615..ab7a1c1 100644
+--- a/src/readelf.c
++++ b/src/readelf.c
+@@ -2550,7 +2550,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ &vernaux_mem);
+ while (vernaux != NULL
+ && vernaux->vna_other != *versym
+- && vernaux->vna_next != 0)
++ && vernaux->vna_next != 0
++ && (verneed_data->d_size - vna_offset
++ >= vernaux->vna_next))
+ {
+ /* Update the offset. */
+ vna_offset += vernaux->vna_next;
+@@ -2567,6 +2569,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ /* Found it. */
+ break;
+
++ if (verneed_data->d_size - vn_offset < verneed->vn_next)
++ break;
++
+ vn_offset += verneed->vn_next;
+ verneed = (verneed->vn_next == 0
+ ? NULL
+@@ -2602,6 +2607,9 @@ handle_symtab (Ebl *ebl, Elf_Scn *scn, GElf_Shdr *shdr)
+ /* Found the definition. */
+ break;
+
++ if (verdef_data->d_size - vd_offset < verdef->vd_next)
++ break;
++
+ vd_offset += verdef->vd_next;
+ verdef = (verdef->vd_next == 0
+ ? NULL
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0001-Backport-fix-for-PR-tree-optimization-97236-fix-bad-.patch b/meta/recipes-devtools/gcc/gcc-9.3/0001-Backport-fix-for-PR-tree-optimization-97236-fix-bad-.patch
deleted file mode 100644
index dc1039dcc8..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0001-Backport-fix-for-PR-tree-optimization-97236-fix-bad-.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-Upstream-Status: Backport [https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=97b668f9a8c6ec565c278a60e7d1492a6932e409]
-Signed-off-by: Jon Mason <jon.mason@arm.com>
-
-From 97b668f9a8c6ec565c278a60e7d1492a6932e409 Mon Sep 17 00:00:00 2001
-From: Matthias Klose <doko@ubuntu.com>
-Date: Tue, 6 Oct 2020 13:41:37 +0200
-Subject: [PATCH] Backport fix for PR/tree-optimization/97236 - fix bad use of
- VMAT_CONTIGUOUS
-
-This avoids using VMAT_CONTIGUOUS with single-element interleaving
-when using V1mode vectors. Instead keep VMAT_ELEMENTWISE but
-continue to avoid load-lanes and gathers.
-
-2020-10-01 Richard Biener <rguenther@suse.de>
-
- PR tree-optimization/97236
- * tree-vect-stmts.c (get_group_load_store_type): Keep
- VMAT_ELEMENTWISE for single-element vectors.
-
- * gcc.dg/vect/pr97236.c: New testcase.
-
-(cherry picked from commit 1ab88985631dd2c5a5e3b5c0dce47cf8b6ed2f82)
----
- gcc/testsuite/gcc.dg/vect/pr97236.c | 43 +++++++++++++++++++++++++++++
- gcc/tree-vect-stmts.c | 20 ++++++--------
- 2 files changed, 52 insertions(+), 11 deletions(-)
- create mode 100644 gcc/testsuite/gcc.dg/vect/pr97236.c
-
-diff --git a/gcc/testsuite/gcc.dg/vect/pr97236.c b/gcc/testsuite/gcc.dg/vect/pr97236.c
-new file mode 100644
-index 000000000000..9d3dc20d953d
---- /dev/null
-+++ b/gcc/testsuite/gcc.dg/vect/pr97236.c
-@@ -0,0 +1,43 @@
-+typedef unsigned char __uint8_t;
-+typedef __uint8_t uint8_t;
-+typedef struct plane_t {
-+ uint8_t *p_pixels;
-+ int i_lines;
-+ int i_pitch;
-+} plane_t;
-+
-+typedef struct {
-+ plane_t p[5];
-+} picture_t;
-+
-+#define N 4
-+
-+void __attribute__((noipa))
-+picture_Clone(picture_t *picture, picture_t *res)
-+{
-+ for (int i = 0; i < N; i++) {
-+ res->p[i].p_pixels = picture->p[i].p_pixels;
-+ res->p[i].i_lines = picture->p[i].i_lines;
-+ res->p[i].i_pitch = picture->p[i].i_pitch;
-+ }
-+}
-+
-+int
-+main()
-+{
-+ picture_t aaa, bbb;
-+ uint8_t pixels[10] = {1, 1, 1, 1, 1, 1, 1, 1};
-+
-+ for (unsigned i = 0; i < N; i++)
-+ aaa.p[i].p_pixels = pixels;
-+
-+ picture_Clone (&aaa, &bbb);
-+
-+ uint8_t c = 0;
-+ for (unsigned i = 0; i < N; i++)
-+ c += bbb.p[i].p_pixels[0];
-+
-+ if (c != N)
-+ __builtin_abort ();
-+ return 0;
-+}
-diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
-index 507f81b0a0e8..ffbba3441de2 100644
---- a/gcc/tree-vect-stmts.c
-+++ b/gcc/tree-vect-stmts.c
-@@ -2355,25 +2355,23 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
- /* First cope with the degenerate case of a single-element
- vector. */
- if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
-- *memory_access_type = VMAT_CONTIGUOUS;
-+ ;
-
- /* Otherwise try using LOAD/STORE_LANES. */
-- if (*memory_access_type == VMAT_ELEMENTWISE
-- && (vls_type == VLS_LOAD
-- ? vect_load_lanes_supported (vectype, group_size, masked_p)
-- : vect_store_lanes_supported (vectype, group_size,
-- masked_p)))
-+ else if (vls_type == VLS_LOAD
-+ ? vect_load_lanes_supported (vectype, group_size, masked_p)
-+ : vect_store_lanes_supported (vectype, group_size,
-+ masked_p))
- {
- *memory_access_type = VMAT_LOAD_STORE_LANES;
- overrun_p = would_overrun_p;
- }
-
- /* If that fails, try using permuting loads. */
-- if (*memory_access_type == VMAT_ELEMENTWISE
-- && (vls_type == VLS_LOAD
-- ? vect_grouped_load_supported (vectype, single_element_p,
-- group_size)
-- : vect_grouped_store_supported (vectype, group_size)))
-+ else if (vls_type == VLS_LOAD
-+ ? vect_grouped_load_supported (vectype, single_element_p,
-+ group_size)
-+ : vect_grouped_store_supported (vectype, group_size))
- {
- *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
- overrun_p = would_overrun_p;
---
-2.20.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch b/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch
deleted file mode 100644
index a7e29f4bd7..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From 20da13e395bde597d8337167c712039c8f923c3b Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:58 +0100
-Subject: [PATCH 1/3] aarch64: New Straight Line Speculation (SLS) mitigation
- flags
-
-Here we introduce the flags that will be used for straight line speculation.
-
-The new flag introduced is `-mharden-sls=`.
-This flag can take arguments of `none`, `all`, or a comma seperated list
-of one or more of `retbr` or `blr`.
-`none` indicates no special mitigation of the straight line speculation
-vulnerability.
-`all` requests all mitigations currently implemented.
-`retbr` requests that the RET and BR instructions have a speculation
-barrier inserted after them.
-`blr` requests that BLR instructions are replaced by a BL to a function
-stub using a BR with a speculation barrier after it.
-
-Setting this on a per-function basis using attributes or the like is not
-enabled, but may be in the future.
-
-(cherry picked from commit a9ba2a9b77bec7eacaf066801f22d1c366a2bc86)
-
-gcc/ChangeLog:
-
-2020-06-02 Matthew Malcomson <matthew.malcomson@arm.com>
-
- * config/aarch64/aarch64-protos.h (aarch64_harden_sls_retbr_p):
- New.
- (aarch64_harden_sls_blr_p): New.
- * config/aarch64/aarch64.c (enum aarch64_sls_hardening_type):
- New.
- (aarch64_harden_sls_retbr_p): New.
- (aarch64_harden_sls_blr_p): New.
- (aarch64_validate_sls_mitigation): New.
- (aarch64_override_options): Parse options for SLS mitigation.
- * config/aarch64/aarch64.opt (-mharden-sls): New option.
- * doc/invoke.texi: Document new option.
----
- gcc/config/aarch64/aarch64-protos.h | 3 ++
- gcc/config/aarch64/aarch64.c | 76 +++++++++++++++++++++++++++++
- gcc/config/aarch64/aarch64.opt | 4 ++
- gcc/doc/invoke.texi | 12 +++++
- 4 files changed, 95 insertions(+)
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index c083cad53..31493f412 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -644,4 +644,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
-
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
-+extern bool aarch64_harden_sls_retbr_p (void);
-+extern bool aarch64_harden_sls_blr_p (void);
-+
- #endif /* GCC_AARCH64_PROTOS_H */
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index b452a53af..269ff6c92 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -11734,6 +11734,79 @@ aarch64_validate_mcpu (const char *str, const struct processor **res,
- return false;
- }
-
-+/* Straight line speculation indicators. */
-+enum aarch64_sls_hardening_type
-+{
-+ SLS_NONE = 0,
-+ SLS_RETBR = 1,
-+ SLS_BLR = 2,
-+ SLS_ALL = 3,
-+};
-+static enum aarch64_sls_hardening_type aarch64_sls_hardening;
-+
-+/* Return whether we should mitigatate Straight Line Speculation for the RET
-+ and BR instructions. */
-+bool
-+aarch64_harden_sls_retbr_p (void)
-+{
-+ return aarch64_sls_hardening & SLS_RETBR;
-+}
-+
-+/* Return whether we should mitigatate Straight Line Speculation for the BLR
-+ instruction. */
-+bool
-+aarch64_harden_sls_blr_p (void)
-+{
-+ return aarch64_sls_hardening & SLS_BLR;
-+}
-+
-+/* As of yet we only allow setting these options globally, in the future we may
-+ allow setting them per function. */
-+static void
-+aarch64_validate_sls_mitigation (const char *const_str)
-+{
-+ char *token_save = NULL;
-+ char *str = NULL;
-+
-+ if (strcmp (const_str, "none") == 0)
-+ {
-+ aarch64_sls_hardening = SLS_NONE;
-+ return;
-+ }
-+ if (strcmp (const_str, "all") == 0)
-+ {
-+ aarch64_sls_hardening = SLS_ALL;
-+ return;
-+ }
-+
-+ char *str_root = xstrdup (const_str);
-+ str = strtok_r (str_root, ",", &token_save);
-+ if (!str)
-+ error ("invalid argument given to %<-mharden-sls=%>");
-+
-+ int temp = SLS_NONE;
-+ while (str)
-+ {
-+ if (strcmp (str, "blr") == 0)
-+ temp |= SLS_BLR;
-+ else if (strcmp (str, "retbr") == 0)
-+ temp |= SLS_RETBR;
-+ else if (strcmp (str, "none") == 0 || strcmp (str, "all") == 0)
-+ {
-+ error ("%<%s%> must be by itself for %<-mharden-sls=%>", str);
-+ break;
-+ }
-+ else
-+ {
-+ error ("invalid argument %<%s%> for %<-mharden-sls=%>", str);
-+ break;
-+ }
-+ str = strtok_r (NULL, ",", &token_save);
-+ }
-+ aarch64_sls_hardening = (aarch64_sls_hardening_type) temp;
-+ free (str_root);
-+}
-+
- /* Parses CONST_STR for branch protection features specified in
- aarch64_branch_protect_types, and set any global variables required. Returns
- the parsing result and assigns LAST_STR to the last processed token from
-@@ -11972,6 +12045,9 @@ aarch64_override_options (void)
- selected_arch = NULL;
- selected_tune = NULL;
-
-+ if (aarch64_harden_sls_string)
-+ aarch64_validate_sls_mitigation (aarch64_harden_sls_string);
-+
- if (aarch64_branch_protection_string)
- aarch64_validate_mbranch_protection (aarch64_branch_protection_string);
-
-diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
-index 3c6d1cc90..d27ab6df8 100644
---- a/gcc/config/aarch64/aarch64.opt
-+++ b/gcc/config/aarch64/aarch64.opt
-@@ -71,6 +71,10 @@ mgeneral-regs-only
- Target Report RejectNegative Mask(GENERAL_REGS_ONLY) Save
- Generate code which uses only the general registers.
-
-+mharden-sls=
-+Target RejectNegative Joined Var(aarch64_harden_sls_string)
-+Generate code to mitigate against straight line speculation.
-+
- mfix-cortex-a53-835769
- Target Report Var(aarch64_fix_a53_err835769) Init(2) Save
- Workaround for ARM Cortex-A53 Erratum number 835769.
-diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
-index 2f7ffe456..5f04a7d2b 100644
---- a/gcc/doc/invoke.texi
-+++ b/gcc/doc/invoke.texi
-@@ -638,6 +638,7 @@ Objective-C and Objective-C++ Dialects}.
- -mpc-relative-literal-loads @gol
- -msign-return-address=@var{scope} @gol
- -mbranch-protection=@var{none}|@var{standard}|@var{pac-ret}[+@var{leaf}]|@var{bti} @gol
-+-mharden-sls=@var{opts} @gol
- -march=@var{name} -mcpu=@var{name} -mtune=@var{name} @gol
- -moverride=@var{string} -mverbose-cost-dump @gol
- -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg} @gol
-@@ -15955,6 +15956,17 @@ argument @samp{leaf} can be used to extend the signing to include leaf
- functions.
- @samp{bti} turns on branch target identification mechanism.
-
-+@item -mharden-sls=@var{opts}
-+@opindex mharden-sls
-+Enable compiler hardening against straight line speculation (SLS).
-+@var{opts} is a comma-separated list of the following options:
-+@table @samp
-+@item retbr
-+@item blr
-+@end table
-+In addition, @samp{-mharden-sls=all} enables all SLS hardening while
-+@samp{-mharden-sls=none} disables all SLS hardening.
-+
- @item -msve-vector-bits=@var{bits}
- @opindex msve-vector-bits
- Specify the number of bits in an SVE vector register. This option only has
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch b/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch
deleted file mode 100644
index c972088d2b..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch
+++ /dev/null
@@ -1,600 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From dc586a749228ecfb71f72ec2ca10e6f7b6874af3 Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:59 +0100
-Subject: [PATCH 2/3] aarch64: Introduce SLS mitigation for RET and BR
- instructions
-
-Instructions following RET or BR are not necessarily executed. In order
-to avoid speculation past RET and BR we can simply append a speculation
-barrier.
-
-Since these speculation barriers will not be architecturally executed,
-they are not expected to add a high performance penalty.
-
-The speculation barrier is to be SB when targeting architectures which
-have this enabled, and DSB SY + ISB otherwise.
-
-We add tests for each of the cases where such an instruction was seen.
-
-This is implemented by modifying each machine description pattern that
-emits either a RET or a BR instruction. We choose not to use something
-like `TARGET_ASM_FUNCTION_EPILOGUE` since it does not affect the
-`indirect_jump`, `jump`, `sibcall_insn` and `sibcall_value_insn`
-patterns and we find it preferable to implement the functionality in the
-same way for every pattern.
-
-There is one particular case which is slightly tricky. The
-implementation of TARGET_ASM_TRAMPOLINE_TEMPLATE uses a BR which needs
-to be mitigated against. The trampoline template is used *once* per
-compilation unit, and the TRAMPOLINE_SIZE is exposed to the user via the
-builtin macro __LIBGCC_TRAMPOLINE_SIZE__.
-In the future we may implement function specific attributes to turn on
-and off hardening on a per-function basis.
-The fixed nature of the trampoline described above implies it will be
-safer to ensure this speculation barrier is always used.
-
-Testing:
- Bootstrap and regtest done on aarch64-none-linux
- Used a temporary hack(1) to use these options on every test in the
- testsuite and a script to check that the output never emitted an
- unmitigated RET or BR.
-
-1) Temporary hack was a change to the testsuite to always use
-`-save-temps` and run a script on the assembly output of those
-compilations which produced one to ensure every RET or BR is immediately
-followed by a speculation barrier.
-
-(cherry picked from be178ecd5ac1fe1510d960ff95c66d0ff831afe1)
-
-gcc/ChangeLog:
-
- * config/aarch64/aarch64-protos.h (aarch64_sls_barrier): New.
- * config/aarch64/aarch64.c (aarch64_output_casesi): Emit
- speculation barrier after BR instruction if needs be.
- (aarch64_trampoline_init): Handle ptr_mode value & adjust size
- of code copied.
- (aarch64_sls_barrier): New.
- (aarch64_asm_trampoline_template): Add needed barriers.
- * config/aarch64/aarch64.h (AARCH64_ISA_SB): New.
- (TARGET_SB): New.
- (TRAMPOLINE_SIZE): Account for barrier.
- * config/aarch64/aarch64.md (indirect_jump, *casesi_dispatch,
- simple_return, *do_return, *sibcall_insn, *sibcall_value_insn):
- Emit barrier if needs be, also account for possible barrier using
- "sls_length" attribute.
- (sls_length): New attribute.
- (length): Determine default using any non-default sls_length
- value.
-
-gcc/testsuite/ChangeLog:
-
- * gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c: New test.
- * gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c:
- New test.
- * gcc.target/aarch64/sls-mitigation/sls-mitigation.exp: New file.
- * lib/target-supports.exp (check_effective_target_aarch64_asm_sb_ok):
- New proc.
----
- gcc/config/aarch64/aarch64-protos.h | 1 +
- gcc/config/aarch64/aarch64.c | 41 +++++-
- gcc/config/aarch64/aarch64.h | 10 +-
- gcc/config/aarch64/aarch64.md | 75 ++++++++---
- .../sls-mitigation/sls-miti-retbr-pacret.c | 15 +++
- .../aarch64/sls-mitigation/sls-miti-retbr.c | 119 ++++++++++++++++++
- .../aarch64/sls-mitigation/sls-mitigation.exp | 73 +++++++++++
- gcc/testsuite/lib/target-supports.exp | 3 +-
- 8 files changed, 312 insertions(+), 25 deletions(-)
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index 31493f412..885eae893 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -644,6 +644,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
-
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
-+const char *aarch64_sls_barrier (int);
- extern bool aarch64_harden_sls_retbr_p (void);
- extern bool aarch64_harden_sls_blr_p (void);
-
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index 269ff6c92..dff61105c 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -8412,8 +8412,8 @@ aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
- static void
- aarch64_asm_trampoline_template (FILE *f)
- {
-- int offset1 = 16;
-- int offset2 = 20;
-+ int offset1 = 24;
-+ int offset2 = 28;
-
- if (aarch64_bti_enabled ())
- {
-@@ -8436,6 +8436,17 @@ aarch64_asm_trampoline_template (FILE *f)
- }
- asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]);
-
-+ /* We always emit a speculation barrier.
-+ This is because the same trampoline template is used for every nested
-+ function. Since nested functions are not particularly common or
-+ performant we don't worry too much about the extra instructions to copy
-+ around.
-+ This is not yet a problem, since we have not yet implemented function
-+ specific attributes to choose between hardening against straight line
-+ speculation or not, but such function specific attributes are likely to
-+ happen in the future. */
-+ asm_fprintf (f, "\tdsb\tsy\n\tisb\n");
-+
- /* The trampoline needs an extra padding instruction. In case if BTI is
- enabled the padding instruction is replaced by the BTI instruction at
- the beginning. */
-@@ -8450,10 +8461,14 @@ static void
- aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
- {
- rtx fnaddr, mem, a_tramp;
-- const int tramp_code_sz = 16;
-+ const int tramp_code_sz = 24;
-
- /* Don't need to copy the trailing D-words, we fill those in below. */
-- emit_block_move (m_tramp, assemble_trampoline_template (),
-+ /* We create our own memory address in Pmode so that `emit_block_move` can
-+ use parts of the backend which expect Pmode addresses. */
-+ rtx temp = convert_memory_address (Pmode, XEXP (m_tramp, 0));
-+ emit_block_move (gen_rtx_MEM (BLKmode, temp),
-+ assemble_trampoline_template (),
- GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL);
- mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz);
- fnaddr = XEXP (DECL_RTL (fndecl), 0);
-@@ -8640,6 +8655,8 @@ aarch64_output_casesi (rtx *operands)
- output_asm_insn (buf, operands);
- output_asm_insn (patterns[index][1], operands);
- output_asm_insn ("br\t%3", operands);
-+ output_asm_insn (aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()),
-+ operands);
- assemble_label (asm_out_file, label);
- return "";
- }
-@@ -18976,6 +18993,22 @@ aarch64_file_end_indicate_exec_stack ()
- #undef GNU_PROPERTY_AARCH64_FEATURE_1_BTI
- #undef GNU_PROPERTY_AARCH64_FEATURE_1_AND
-
-+/* Helper function for straight line speculation.
-+ Return what barrier should be emitted for straight line speculation
-+ mitigation.
-+ When not mitigating against straight line speculation this function returns
-+ an empty string.
-+ When mitigating against straight line speculation, use:
-+ * SB when the v8.5-A SB extension is enabled.
-+ * DSB+ISB otherwise. */
-+const char *
-+aarch64_sls_barrier (int mitigation_required)
-+{
-+ return mitigation_required
-+ ? (TARGET_SB ? "sb" : "dsb\tsy\n\tisb")
-+ : "";
-+}
-+
- /* Target-specific selftests. */
-
- #if CHECKING_P
-diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
-index 772a97296..72ddc6fd9 100644
---- a/gcc/config/aarch64/aarch64.h
-+++ b/gcc/config/aarch64/aarch64.h
-@@ -235,6 +235,7 @@ extern unsigned aarch64_architecture_version;
- #define AARCH64_ISA_F16FML (aarch64_isa_flags & AARCH64_FL_F16FML)
- #define AARCH64_ISA_RCPC8_4 (aarch64_isa_flags & AARCH64_FL_RCPC8_4)
- #define AARCH64_ISA_V8_5 (aarch64_isa_flags & AARCH64_FL_V8_5)
-+#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB)
-
- /* Crypto is an optional extension to AdvSIMD. */
- #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
-@@ -285,6 +286,9 @@ extern unsigned aarch64_architecture_version;
- #define TARGET_FIX_ERR_A53_835769_DEFAULT 1
- #endif
-
-+/* SB instruction is enabled through +sb. */
-+#define TARGET_SB (AARCH64_ISA_SB)
-+
- /* Apply the workaround for Cortex-A53 erratum 835769. */
- #define TARGET_FIX_ERR_A53_835769 \
- ((aarch64_fix_a53_err835769 == 2) \
-@@ -931,8 +935,10 @@ typedef struct
-
- #define RETURN_ADDR_RTX aarch64_return_addr
-
--/* BTI c + 3 insns + 2 pointer-sized entries. */
--#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32)
-+/* BTI c + 3 insns
-+ + sls barrier of DSB + ISB.
-+ + 2 pointer-sized entries. */
-+#define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16))
-
- /* Trampolines contain dwords, so must be dword aligned. */
- #define TRAMPOLINE_ALIGNMENT 64
-diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index cc5a887d4..494aee964 100644
---- a/gcc/config/aarch64/aarch64.md
-+++ b/gcc/config/aarch64/aarch64.md
-@@ -331,10 +331,25 @@
- ;; Attribute that specifies whether the alternative uses MOVPRFX.
- (define_attr "movprfx" "no,yes" (const_string "no"))
-
-+;; Attribute to specify that an alternative has the length of a single
-+;; instruction plus a speculation barrier.
-+(define_attr "sls_length" "none,retbr,casesi" (const_string "none"))
-+
- (define_attr "length" ""
- (cond [(eq_attr "movprfx" "yes")
- (const_int 8)
-- ] (const_int 4)))
-+
-+ (eq_attr "sls_length" "retbr")
-+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 4)
-+ (match_test "TARGET_SB") (const_int 8)]
-+ (const_int 12))
-+
-+ (eq_attr "sls_length" "casesi")
-+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 16)
-+ (match_test "TARGET_SB") (const_int 20)]
-+ (const_int 24))
-+ ]
-+ (const_int 4)))
-
- ;; Strictly for compatibility with AArch32 in pipeline models, since AArch64 has
- ;; no predicated insns.
-@@ -370,8 +385,12 @@
- (define_insn "indirect_jump"
- [(set (pc) (match_operand:DI 0 "register_operand" "r"))]
- ""
-- "br\\t%0"
-- [(set_attr "type" "branch")]
-+ {
-+ output_asm_insn ("br\\t%0", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_insn "jump"
-@@ -657,7 +676,7 @@
- "*
- return aarch64_output_casesi (operands);
- "
-- [(set_attr "length" "16")
-+ [(set_attr "sls_length" "casesi")
- (set_attr "type" "branch")]
- )
-
-@@ -736,14 +755,18 @@
- [(return)]
- ""
- {
-+ const char *ret = NULL;
- if (aarch64_return_address_signing_enabled ()
- && TARGET_ARMV8_3
- && !crtl->calls_eh_return)
-- return "retaa";
--
-- return "ret";
-+ ret = "retaa";
-+ else
-+ ret = "ret";
-+ output_asm_insn (ret, operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
- }
-- [(set_attr "type" "branch")]
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_expand "return"
-@@ -755,8 +778,12 @@
- (define_insn "simple_return"
- [(simple_return)]
- "aarch64_use_simple_return_insn_p ()"
-- "ret"
-- [(set_attr "type" "branch")]
-+ {
-+ output_asm_insn ("ret", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ [(set_attr "type" "branch")
-+ (set_attr "sls_length" "retbr")]
- )
-
- (define_insn "*cb<optab><mode>1"
-@@ -947,10 +974,16 @@
- (match_operand 1 "" ""))
- (return)]
- "SIBLING_CALL_P (insn)"
-- "@
-- br\\t%0
-- b\\t%c0"
-- [(set_attr "type" "branch, branch")]
-+ {
-+ if (which_alternative == 0)
-+ {
-+ output_asm_insn ("br\\t%0", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ return "b\\t%c0";
-+ }
-+ [(set_attr "type" "branch, branch")
-+ (set_attr "sls_length" "retbr,none")]
- )
-
- (define_insn "*sibcall_value_insn"
-@@ -960,10 +993,16 @@
- (match_operand 2 "" "")))
- (return)]
- "SIBLING_CALL_P (insn)"
-- "@
-- br\\t%1
-- b\\t%c1"
-- [(set_attr "type" "branch, branch")]
-+ {
-+ if (which_alternative == 0)
-+ {
-+ output_asm_insn ("br\\t%1", operands);
-+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ());
-+ }
-+ return "b\\t%c1";
-+ }
-+ [(set_attr "type" "branch, branch")
-+ (set_attr "sls_length" "retbr,none")]
- )
-
- ;; Call subroutine returning any type.
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
-new file mode 100644
-index 000000000..7656123ee
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c
-@@ -0,0 +1,15 @@
-+/* Avoid ILP32 since pacret is only available for LP64 */
-+/* { dg-do compile { target { ! ilp32 } } } */
-+/* { dg-additional-options "-mharden-sls=retbr -mbranch-protection=pac-ret -march=armv8.3-a" } */
-+
-+/* Testing the do_return pattern for retaa. */
-+long retbr_subcall(void);
-+long retbr_do_return_retaa(void)
-+{
-+ return retbr_subcall()+1;
-+}
-+
-+/* Ensure there are no BR or RET instructions which are not directly followed
-+ by a speculation barrier. */
-+/* { dg-final { scan-assembler-not {\t(br|ret|retaa)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb)} } } */
-+/* { dg-final { scan-assembler-not {ret\t} } } */
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
-new file mode 100644
-index 000000000..573b30cdc
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c
-@@ -0,0 +1,119 @@
-+/* We ensure that -Wpedantic is off since it complains about the trampolines
-+ we explicitly want to test. */
-+/* { dg-additional-options "-mharden-sls=retbr -Wno-pedantic " } */
-+/*
-+ Ensure that the SLS hardening of RET and BR leaves no unprotected RET/BR
-+ instructions.
-+ */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+int
-+retbr_sibcall_value_insn (struct sls_testclass x)
-+{
-+ return x.x(x.left, x.right);
-+}
-+
-+void
-+retbr_sibcall_insn (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+}
-+
-+/* Aim to test two different returns.
-+ One that introduces a tail call in the middle of the function, and one that
-+ has a normal return. */
-+int
-+retbr_multiple_returns (struct sls_testclass x)
-+{
-+ int temp;
-+ if (x.left % 10)
-+ return x.x(x.left, 100);
-+ else if (x.right % 20)
-+ {
-+ return x.x(x.left * x.right, 100);
-+ }
-+ temp = x.left % x.right;
-+ temp *= 100;
-+ temp /= 2;
-+ return temp % 3;
-+}
-+
-+void
-+retbr_multiple_returns_void (struct sls_testclass x)
-+{
-+ if (x.left % 10)
-+ {
-+ x.y(x.left, 100);
-+ }
-+ else if (x.right % 20)
-+ {
-+ x.y(x.left * x.right, 100);
-+ }
-+ return;
-+}
-+
-+/* Testing the casesi jump via register. */
-+__attribute__ ((optimize ("Os")))
-+int
-+retbr_casesi_dispatch (struct sls_testclass x)
-+{
-+ switch (x.left)
-+ {
-+ case -5:
-+ return -2;
-+ case -3:
-+ return -1;
-+ case 0:
-+ return 0;
-+ case 3:
-+ return 1;
-+ case 5:
-+ break;
-+ default:
-+ __builtin_unreachable ();
-+ }
-+ return x.right;
-+}
-+
-+/* Testing the BR in trampolines is mitigated against. */
-+void f1 (void *);
-+void f3 (void *, void (*)(void *));
-+void f2 (void *);
-+
-+int
-+retbr_trampolines (void *a, int b)
-+{
-+ if (!b)
-+ {
-+ f1 (a);
-+ return 1;
-+ }
-+ if (b)
-+ {
-+ void retbr_tramp_internal (void *c)
-+ {
-+ if (c == a)
-+ f2 (c);
-+ }
-+ f3 (a, retbr_tramp_internal);
-+ }
-+ return 0;
-+}
-+
-+/* Testing the indirect_jump pattern. */
-+void
-+retbr_indirect_jump (int *buf)
-+{
-+ __builtin_longjmp(buf, 1);
-+}
-+
-+/* Ensure there are no BR or RET instructions which are not directly followed
-+ by a speculation barrier. */
-+/* { dg-final { scan-assembler-not {\t(br|ret|retaa)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb|sb)} } } */
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-new file mode 100644
-index 000000000..812250379
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp
-@@ -0,0 +1,73 @@
-+# Regression driver for SLS mitigation on AArch64.
-+# Copyright (C) 2020 Free Software Foundation, Inc.
-+# Contributed by ARM Ltd.
-+#
-+# This file is part of GCC.
-+#
-+# GCC is free software; you can redistribute it and/or modify it
-+# under the terms of the GNU General Public License as published by
-+# the Free Software Foundation; either version 3, or (at your option)
-+# any later version.
-+#
-+# GCC is distributed in the hope that it will be useful, but
-+# WITHOUT ANY WARRANTY; without even the implied warranty of
-+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+# General Public License for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with GCC; see the file COPYING3. If not see
-+# <http://www.gnu.org/licenses/>. */
-+
-+# Exit immediately if this isn't an AArch64 target.
-+if {![istarget aarch64*-*-*] } then {
-+ return
-+}
-+
-+# Load support procs.
-+load_lib gcc-dg.exp
-+load_lib torture-options.exp
-+
-+# If a testcase doesn't have special options, use these.
-+global DEFAULT_CFLAGS
-+if ![info exists DEFAULT_CFLAGS] then {
-+ set DEFAULT_CFLAGS " "
-+}
-+
-+# Initialize `dg'.
-+dg-init
-+torture-init
-+
-+# Use different architectures as well as the normal optimisation options.
-+# (i.e. use both SB and DSB+ISB barriers).
-+
-+set save-dg-do-what-default ${dg-do-what-default}
-+# Main loop.
-+# Run with torture tests (i.e. a bunch of different optimisation levels) just
-+# to increase test coverage.
-+set dg-do-what-default assemble
-+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
-+ "-save-temps" $DEFAULT_CFLAGS
-+
-+# Run the same tests but this time with SB extension.
-+# Since not all supported assemblers will support that extension we decide
-+# whether to assemble or just compile based on whether the extension is
-+# supported for the available assembler.
-+
-+set templist {}
-+foreach x $DG_TORTURE_OPTIONS {
-+ lappend templist "$x -march=armv8.3-a+sb "
-+ lappend templist "$x -march=armv8-a+sb "
-+}
-+set-torture-options $templist
-+if { [check_effective_target_aarch64_asm_sb_ok] } {
-+ set dg-do-what-default assemble
-+} else {
-+ set dg-do-what-default compile
-+}
-+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
-+ "-save-temps" $DEFAULT_CFLAGS
-+set dg-do-what-default ${save-dg-do-what-default}
-+
-+# All done.
-+torture-finish
-+dg-finish
-diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
-index ea9a50ccb..79482f9b6 100644
---- a/gcc/testsuite/lib/target-supports.exp
-+++ b/gcc/testsuite/lib/target-supports.exp
-@@ -8579,7 +8579,8 @@ proc check_effective_target_aarch64_tiny { } {
- # Create functions to check that the AArch64 assembler supports the
- # various architecture extensions via the .arch_extension pseudo-op.
-
--foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"} {
-+foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
-+ "sb"} {
- eval [string map [list FUNC $aarch64_ext] {
- proc check_effective_target_aarch64_asm_FUNC_ok { } {
- if { [istarget aarch64*-*-*] } {
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch b/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch
deleted file mode 100644
index 6dffef0a34..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch
+++ /dev/null
@@ -1,659 +0,0 @@
-CVE: CVE-2020-13844
-Upstream-Status: Backport
-Signed-off-by: Ross Burton <ross.burton@arm.com>
-
-From 2155170525f93093b90a1a065e7ed71a925566e9 Mon Sep 17 00:00:00 2001
-From: Matthew Malcomson <matthew.malcomson@arm.com>
-Date: Thu, 9 Jul 2020 09:11:59 +0100
-Subject: [PATCH 3/3] aarch64: Mitigate SLS for BLR instruction
-
-This patch introduces the mitigation for Straight Line Speculation past
-the BLR instruction.
-
-This mitigation replaces BLR instructions with a BL to a stub which uses
-a BR to jump to the original value. These function stubs are then
-appended with a speculation barrier to ensure no straight line
-speculation happens after these jumps.
-
-When optimising for speed we use a set of stubs for each function since
-this should help the branch predictor make more accurate predictions
-about where a stub should branch.
-
-When optimising for size we use one set of stubs for all functions.
-This set of stubs can have human readable names, and we are using
-`__call_indirect_x<N>` for register x<N>.
-
-When BTI branch protection is enabled the BLR instruction can jump to a
-`BTI c` instruction using any register, while the BR instruction can
-only jump to a `BTI c` instruction using the x16 or x17 registers.
-Hence, in order to ensure this transformation is safe we mov the value
-of the original register into x16 and use x16 for the BR.
-
-As an example when optimising for size:
-a
- BLR x0
-instruction would get transformed to something like
- BL __call_indirect_x0
-where __call_indirect_x0 labels a thunk that contains
-__call_indirect_x0:
- MOV X16, X0
- BR X16
- <speculation barrier>
-
-The first version of this patch used local symbols specific to a
-compilation unit to try and avoid relocations.
-This was mistaken since functions coming from the same compilation unit
-can still be in different sections, and the assembler will insert
-relocations at jumps between sections.
-
-On any relocation the linker is permitted to emit a veneer to handle
-jumps between symbols that are very far apart. The registers x16 and
-x17 may be clobbered by these veneers.
-Hence the function stubs cannot rely on the values of x16 and x17 being
-the same as just before the function stub is called.
-
-Similar can be said for the hot/cold partitioning of single functions,
-so function-local stubs have the same restriction.
-
-This updated version of the patch never emits function stubs for x16 and
-x17, and instead forces other registers to be used.
-
-Given the above, there is now no benefit to local symbols (since they
-are not enough to avoid dealing with linker intricacies). This patch
-now uses global symbols with hidden visibility each stored in their own
-COMDAT section. This means stubs can be shared between compilation
-units while still avoiding the PLT indirection.
-
-This patch also removes the `__call_indirect_x30` stub (and
-function-local equivalent) which would simply jump back to the original
-location.
-
-The function-local stubs are emitted to the assembly output file in one
-chunk, which means we need not add the speculation barrier directly
-after each one.
-This is because we know for certain that the instructions directly after
-the BR in all but the last function stub will be from another one of
-these stubs and hence will not contain a speculation gadget.
-Instead we add a speculation barrier at the end of the sequence of
-stubs.
-
-The global stubs are emitted in COMDAT/.linkonce sections by
-themselves so that the linker can remove duplicates from multiple object
-files. This means they are not emitted in one chunk, and each one must
-include the speculation barrier.
-
-Another difference is that since the global stubs are shared across
-compilation units we do not know that all functions will be targeting an
-architecture supporting the SB instruction.
-Rather than provide multiple stubs for each architecture, we provide a
-stub that will work for all architectures -- using the DSB+ISB barrier.
-
-This mitigation does not apply for BLR instructions in the following
-places:
-- Some accesses to thread-local variables use a code sequence with a BLR
- instruction. This code sequence is part of the binary interface between
- compiler and linker. If this BLR instruction needs to be mitigated, it'd
- probably be best to do so in the linker. It seems that the code sequence
- for thread-local variable access is unlikely to lead to a Spectre Revalation
- Gadget.
-- PLT stubs are produced by the linker and each contain a BLR instruction.
- It seems that at most only after the last PLT stub a Spectre Revalation
- Gadget might appear.
-
-Testing:
- Bootstrap and regtest on AArch64
- (with BOOT_CFLAGS="-mharden-sls=retbr,blr")
- Used a temporary hack(1) in gcc-dg.exp to use these options on every
- test in the testsuite, a slight modification to emit the speculation
- barrier after every function stub, and a script to check that the
- output never emitted a BLR, or unmitigated BR or RET instruction.
- Similar on an aarch64-none-elf cross-compiler.
-
-1) Temporary hack emitted a speculation barrier at the end of every stub
-function, and used a script to ensure that:
- a) Every RET or BR is immediately followed by a speculation barrier.
- b) No BLR instruction is emitted by compiler.
-
-(cherry picked from 96b7f495f9269d5448822e4fc28882edb35a58d7)
-
-gcc/ChangeLog:
-
- * config/aarch64/aarch64-protos.h (aarch64_indirect_call_asm):
- New declaration.
- * config/aarch64/aarch64.c (aarch64_regno_regclass): Handle new
- stub registers class.
- (aarch64_class_max_nregs): Likewise.
- (aarch64_register_move_cost): Likewise.
- (aarch64_sls_shared_thunks): Global array to store stub labels.
- (aarch64_sls_emit_function_stub): New.
- (aarch64_create_blr_label): New.
- (aarch64_sls_emit_blr_function_thunks): New.
- (aarch64_sls_emit_shared_blr_thunks): New.
- (aarch64_asm_file_end): New.
- (aarch64_indirect_call_asm): New.
- (TARGET_ASM_FILE_END): Use aarch64_asm_file_end.
- (TARGET_ASM_FUNCTION_EPILOGUE): Use
- aarch64_sls_emit_blr_function_thunks.
- * config/aarch64/aarch64.h (STB_REGNUM_P): New.
- (enum reg_class): Add STUB_REGS class.
- (machine_function): Introduce `call_via` array for
- function-local stub labels.
- * config/aarch64/aarch64.md (*call_insn, *call_value_insn): Use
- aarch64_indirect_call_asm to emit code when hardening BLR
- instructions.
- * config/aarch64/constraints.md (Ucr): New constraint
- representing registers for indirect calls. Is GENERAL_REGS
- usually, and STUB_REGS when hardening BLR instruction against
- SLS.
- * config/aarch64/predicates.md (aarch64_general_reg): STUB_REGS class
- is also a general register.
-
-gcc/testsuite/ChangeLog:
-
- * gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c: New test.
- * gcc.target/aarch64/sls-mitigation/sls-miti-blr.c: New test.
----
- gcc/config/aarch64/aarch64-protos.h | 1 +
- gcc/config/aarch64/aarch64.c | 225 +++++++++++++++++-
- gcc/config/aarch64/aarch64.h | 15 ++
- gcc/config/aarch64/aarch64.md | 11 +-
- gcc/config/aarch64/constraints.md | 9 +
- gcc/config/aarch64/predicates.md | 3 +-
- .../aarch64/sls-mitigation/sls-miti-blr-bti.c | 40 ++++
- .../aarch64/sls-mitigation/sls-miti-blr.c | 33 +++
- 8 files changed, 328 insertions(+), 9 deletions(-)
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
- create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-
-diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
-index 885eae893..2676e43ae 100644
---- a/gcc/config/aarch64/aarch64-protos.h
-+++ b/gcc/config/aarch64/aarch64-protos.h
-@@ -645,6 +645,7 @@ poly_uint64 aarch64_regmode_natural_size (machine_mode);
- bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
-
- const char *aarch64_sls_barrier (int);
-+const char *aarch64_indirect_call_asm (rtx);
- extern bool aarch64_harden_sls_retbr_p (void);
- extern bool aarch64_harden_sls_blr_p (void);
-
-diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
-index dff61105c..bc6c02c3a 100644
---- a/gcc/config/aarch64/aarch64.c
-+++ b/gcc/config/aarch64/aarch64.c
-@@ -8190,6 +8190,9 @@ aarch64_label_mentioned_p (rtx x)
- enum reg_class
- aarch64_regno_regclass (unsigned regno)
- {
-+ if (STUB_REGNUM_P (regno))
-+ return STUB_REGS;
-+
- if (GP_REGNUM_P (regno))
- return GENERAL_REGS;
-
-@@ -8499,6 +8502,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
- unsigned int nregs;
- switch (regclass)
- {
-+ case STUB_REGS:
- case TAILCALL_ADDR_REGS:
- case POINTER_REGS:
- case GENERAL_REGS:
-@@ -10693,10 +10697,12 @@ aarch64_register_move_cost (machine_mode mode,
- = aarch64_tune_params.regmove_cost;
-
- /* Caller save and pointer regs are equivalent to GENERAL_REGS. */
-- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS)
-+ if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS
-+ || to == STUB_REGS)
- to = GENERAL_REGS;
-
-- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS)
-+ if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS
-+ || from == STUB_REGS)
- from = GENERAL_REGS;
-
- /* Moving between GPR and stack cost is the same as GP2GP. */
-@@ -19009,6 +19015,215 @@ aarch64_sls_barrier (int mitigation_required)
- : "";
- }
-
-+static GTY (()) tree aarch64_sls_shared_thunks[30];
-+static GTY (()) bool aarch64_sls_shared_thunks_needed = false;
-+const char *indirect_symbol_names[30] = {
-+ "__call_indirect_x0",
-+ "__call_indirect_x1",
-+ "__call_indirect_x2",
-+ "__call_indirect_x3",
-+ "__call_indirect_x4",
-+ "__call_indirect_x5",
-+ "__call_indirect_x6",
-+ "__call_indirect_x7",
-+ "__call_indirect_x8",
-+ "__call_indirect_x9",
-+ "__call_indirect_x10",
-+ "__call_indirect_x11",
-+ "__call_indirect_x12",
-+ "__call_indirect_x13",
-+ "__call_indirect_x14",
-+ "__call_indirect_x15",
-+ "", /* "__call_indirect_x16", */
-+ "", /* "__call_indirect_x17", */
-+ "__call_indirect_x18",
-+ "__call_indirect_x19",
-+ "__call_indirect_x20",
-+ "__call_indirect_x21",
-+ "__call_indirect_x22",
-+ "__call_indirect_x23",
-+ "__call_indirect_x24",
-+ "__call_indirect_x25",
-+ "__call_indirect_x26",
-+ "__call_indirect_x27",
-+ "__call_indirect_x28",
-+ "__call_indirect_x29",
-+};
-+
-+/* Function to create a BLR thunk. This thunk is used to mitigate straight
-+ line speculation. Instead of a simple BLR that can be speculated past,
-+ we emit a BL to this thunk, and this thunk contains a BR to the relevant
-+ register. These thunks have the relevant speculation barries put after
-+ their indirect branch so that speculation is blocked.
-+
-+ We use such a thunk so the speculation barriers are kept off the
-+ architecturally executed path in order to reduce the performance overhead.
-+
-+ When optimizing for size we use stubs shared by the linked object.
-+ When optimizing for performance we emit stubs for each function in the hope
-+ that the branch predictor can better train on jumps specific for a given
-+ function. */
-+rtx
-+aarch64_sls_create_blr_label (int regnum)
-+{
-+ gcc_assert (STUB_REGNUM_P (regnum));
-+ if (optimize_function_for_size_p (cfun))
-+ {
-+ /* For the thunks shared between different functions in this compilation
-+ unit we use a named symbol -- this is just for users to more easily
-+ understand the generated assembly. */
-+ aarch64_sls_shared_thunks_needed = true;
-+ const char *thunk_name = indirect_symbol_names[regnum];
-+ if (aarch64_sls_shared_thunks[regnum] == NULL)
-+ {
-+ /* Build a decl representing this function stub and record it for
-+ later. We build a decl here so we can use the GCC machinery for
-+ handling sections automatically (through `get_named_section` and
-+ `make_decl_one_only`). That saves us a lot of trouble handling
-+ the specifics of different output file formats. */
-+ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
-+ get_identifier (thunk_name),
-+ build_function_type_list (void_type_node,
-+ NULL_TREE));
-+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
-+ NULL_TREE, void_type_node);
-+ TREE_PUBLIC (decl) = 1;
-+ TREE_STATIC (decl) = 1;
-+ DECL_IGNORED_P (decl) = 1;
-+ DECL_ARTIFICIAL (decl) = 1;
-+ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
-+ resolve_unique_section (decl, 0, false);
-+ aarch64_sls_shared_thunks[regnum] = decl;
-+ }
-+
-+ return gen_rtx_SYMBOL_REF (Pmode, thunk_name);
-+ }
-+
-+ if (cfun->machine->call_via[regnum] == NULL)
-+ cfun->machine->call_via[regnum]
-+ = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
-+ return cfun->machine->call_via[regnum];
-+}
-+
-+/* Helper function for aarch64_sls_emit_blr_function_thunks and
-+ aarch64_sls_emit_shared_blr_thunks below. */
-+static void
-+aarch64_sls_emit_function_stub (FILE *out_file, int regnum)
-+{
-+ /* Save in x16 and branch to that function so this transformation does
-+ not prevent jumping to `BTI c` instructions. */
-+ asm_fprintf (out_file, "\tmov\tx16, x%d\n", regnum);
-+ asm_fprintf (out_file, "\tbr\tx16\n");
-+}
-+
-+/* Emit all BLR stubs for this particular function.
-+ Here we emit all the BLR stubs needed for the current function. Since we
-+ emit these stubs in a consecutive block we know there will be no speculation
-+ gadgets between each stub, and hence we only emit a speculation barrier at
-+ the end of the stub sequences.
-+
-+ This is called in the TARGET_ASM_FUNCTION_EPILOGUE hook. */
-+void
-+aarch64_sls_emit_blr_function_thunks (FILE *out_file)
-+{
-+ if (! aarch64_harden_sls_blr_p ())
-+ return;
-+
-+ bool any_functions_emitted = false;
-+ /* We must save and restore the current function section since this assembly
-+ is emitted at the end of the function. This means it can be emitted *just
-+ after* the cold section of a function. That cold part would be emitted in
-+ a different section. That switch would trigger a `.cfi_endproc` directive
-+ to be emitted in the original section and a `.cfi_startproc` directive to
-+ be emitted in the new section. Switching to the original section without
-+ restoring would mean that the `.cfi_endproc` emitted as a function ends
-+ would happen in a different section -- leaving an unmatched
-+ `.cfi_startproc` in the cold text section and an unmatched `.cfi_endproc`
-+ in the standard text section. */
-+ section *save_text_section = in_section;
-+ switch_to_section (function_section (current_function_decl));
-+ for (int regnum = 0; regnum < 30; ++regnum)
-+ {
-+ rtx specu_label = cfun->machine->call_via[regnum];
-+ if (specu_label == NULL)
-+ continue;
-+
-+ targetm.asm_out.print_operand (out_file, specu_label, 0);
-+ asm_fprintf (out_file, ":\n");
-+ aarch64_sls_emit_function_stub (out_file, regnum);
-+ any_functions_emitted = true;
-+ }
-+ if (any_functions_emitted)
-+ /* Can use the SB if needs be here, since this stub will only be used
-+ by the current function, and hence for the current target. */
-+ asm_fprintf (out_file, "\t%s\n", aarch64_sls_barrier (true));
-+ switch_to_section (save_text_section);
-+}
-+
-+/* Emit shared BLR stubs for the current compilation unit.
-+ Over the course of compiling this unit we may have converted some BLR
-+ instructions to a BL to a shared stub function. This is where we emit those
-+ stub functions.
-+ This function is for the stubs shared between different functions in this
-+ compilation unit. We share when optimizing for size instead of speed.
-+
-+ This function is called through the TARGET_ASM_FILE_END hook. */
-+void
-+aarch64_sls_emit_shared_blr_thunks (FILE *out_file)
-+{
-+ if (! aarch64_sls_shared_thunks_needed)
-+ return;
-+
-+ for (int regnum = 0; regnum < 30; ++regnum)
-+ {
-+ tree decl = aarch64_sls_shared_thunks[regnum];
-+ if (!decl)
-+ continue;
-+
-+ const char *name = indirect_symbol_names[regnum];
-+ switch_to_section (get_named_section (decl, NULL, 0));
-+ ASM_OUTPUT_ALIGN (out_file, 2);
-+ targetm.asm_out.globalize_label (out_file, name);
-+ /* Only emits if the compiler is configured for an assembler that can
-+ handle visibility directives. */
-+ targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
-+ ASM_OUTPUT_TYPE_DIRECTIVE (out_file, name, "function");
-+ ASM_OUTPUT_LABEL (out_file, name);
-+ aarch64_sls_emit_function_stub (out_file, regnum);
-+ /* Use the most conservative target to ensure it can always be used by any
-+ function in the translation unit. */
-+ asm_fprintf (out_file, "\tdsb\tsy\n\tisb\n");
-+ ASM_DECLARE_FUNCTION_SIZE (out_file, name, decl);
-+ }
-+}
-+
-+/* Implement TARGET_ASM_FILE_END. */
-+void
-+aarch64_asm_file_end ()
-+{
-+ aarch64_sls_emit_shared_blr_thunks (asm_out_file);
-+ /* Since this function will be called for the ASM_FILE_END hook, we ensure
-+ that what would be called otherwise (e.g. `file_end_indicate_exec_stack`
-+ for FreeBSD) still gets called. */
-+#ifdef TARGET_ASM_FILE_END
-+ TARGET_ASM_FILE_END ();
-+#endif
-+}
-+
-+const char *
-+aarch64_indirect_call_asm (rtx addr)
-+{
-+ gcc_assert (REG_P (addr));
-+ if (aarch64_harden_sls_blr_p ())
-+ {
-+ rtx stub_label = aarch64_sls_create_blr_label (REGNO (addr));
-+ output_asm_insn ("bl\t%0", &stub_label);
-+ }
-+ else
-+ output_asm_insn ("blr\t%0", &addr);
-+ return "";
-+}
-+
- /* Target-specific selftests. */
-
- #if CHECKING_P
-@@ -19529,6 +19744,12 @@ aarch64_libgcc_floating_mode_supported_p
- #define TARGET_RUN_TARGET_SELFTESTS selftest::aarch64_run_selftests
- #endif /* #if CHECKING_P */
-
-+#undef TARGET_ASM_FILE_END
-+#define TARGET_ASM_FILE_END aarch64_asm_file_end
-+
-+#undef TARGET_ASM_FUNCTION_EPILOGUE
-+#define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks
-+
- struct gcc_target targetm = TARGET_INITIALIZER;
-
- #include "gt-aarch64.h"
-diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
-index 72ddc6fd9..60682a100 100644
---- a/gcc/config/aarch64/aarch64.h
-+++ b/gcc/config/aarch64/aarch64.h
-@@ -540,6 +540,16 @@ extern unsigned aarch64_architecture_version;
- #define GP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
-
-+/* Registers known to be preserved over a BL instruction. This consists of the
-+ GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the
-+ BL instruction itself, while the x16 and x17 registers may be used by
-+ veneers which can be inserted by the linker. */
-+#define STUB_REGNUM_P(REGNO) \
-+ (GP_REGNUM_P (REGNO) \
-+ && (REGNO) != R16_REGNUM \
-+ && (REGNO) != R17_REGNUM \
-+ && (REGNO) != R30_REGNUM) \
-+
- #define FP_REGNUM_P(REGNO) \
- (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
-
-@@ -561,6 +571,7 @@ enum reg_class
- {
- NO_REGS,
- TAILCALL_ADDR_REGS,
-+ STUB_REGS,
- GENERAL_REGS,
- STACK_REG,
- POINTER_REGS,
-@@ -580,6 +591,7 @@ enum reg_class
- { \
- "NO_REGS", \
- "TAILCALL_ADDR_REGS", \
-+ "STUB_REGS", \
- "GENERAL_REGS", \
- "STACK_REG", \
- "POINTER_REGS", \
-@@ -596,6 +608,7 @@ enum reg_class
- { \
- { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
- { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\
-+ { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \
- { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
- { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
- { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
-@@ -735,6 +748,8 @@ typedef struct GTY (()) machine_function
- struct aarch64_frame frame;
- /* One entry for each hard register. */
- bool reg_is_wrapped_separately[LAST_SAVED_REGNUM];
-+ /* One entry for each general purpose register. */
-+ rtx call_via[SP_REGNUM];
- bool label_is_assembled;
- } machine_function;
- #endif
-diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 494aee964..ed8cf8ece 100644
---- a/gcc/config/aarch64/aarch64.md
-+++ b/gcc/config/aarch64/aarch64.md
-@@ -908,15 +908,14 @@
- )
-
- (define_insn "*call_insn"
-- [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "r, Usf"))
-+ [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf"))
- (match_operand 1 "" ""))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "@
-- blr\\t%0
-+ * return aarch64_indirect_call_asm (operands[0]);
- bl\\t%c0"
-- [(set_attr "type" "call, call")]
--)
-+ [(set_attr "type" "call, call")])
-
- (define_expand "call_value"
- [(parallel [(set (match_operand 0 "" "")
-@@ -934,12 +933,12 @@
-
- (define_insn "*call_value_insn"
- [(set (match_operand 0 "" "")
-- (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "r, Usf"))
-+ (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf"))
- (match_operand 2 "" "")))
- (clobber (reg:DI LR_REGNUM))]
- ""
- "@
-- blr\\t%1
-+ * return aarch64_indirect_call_asm (operands[1]);
- bl\\t%c1"
- [(set_attr "type" "call, call")]
- )
-diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
-index 21f9549e6..7756dbe83 100644
---- a/gcc/config/aarch64/constraints.md
-+++ b/gcc/config/aarch64/constraints.md
-@@ -24,6 +24,15 @@
- (define_register_constraint "Ucs" "TAILCALL_ADDR_REGS"
- "@internal Registers suitable for an indirect tail call")
-
-+(define_register_constraint "Ucr"
-+ "aarch64_harden_sls_blr_p () ? STUB_REGS : GENERAL_REGS"
-+ "@internal Registers to be used for an indirect call.
-+ This is usually the general registers, but when we are hardening against
-+ Straight Line Speculation we disallow x16, x17, and x30 so we can use
-+ indirection stubs. These indirection stubs cannot use the above registers
-+ since they will be reached by a BL that may have to go through a linker
-+ veneer.")
-+
- (define_register_constraint "w" "FP_REGS"
- "Floating point and SIMD vector registers.")
-
-diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
-index 8e1b78421..4250aecb3 100644
---- a/gcc/config/aarch64/predicates.md
-+++ b/gcc/config/aarch64/predicates.md
-@@ -32,7 +32,8 @@
-
- (define_predicate "aarch64_general_reg"
- (and (match_operand 0 "register_operand")
-- (match_test "REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS")))
-+ (match_test "REGNO_REG_CLASS (REGNO (op)) == STUB_REGS
-+ || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS")))
-
- ;; Return true if OP a (const_int 0) operand.
- (define_predicate "const0_operand"
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
-new file mode 100644
-index 000000000..b1fb754c7
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c
-@@ -0,0 +1,40 @@
-+/* { dg-do compile } */
-+/* { dg-additional-options "-mharden-sls=blr -mbranch-protection=bti" } */
-+/*
-+ Ensure that the SLS hardening of BLR leaves no BLR instructions.
-+ Here we also check that there are no BR instructions with anything except an
-+ x16 or x17 register. This is because a `BTI c` instruction can be branched
-+ to using a BLR instruction using any register, but can only be branched to
-+ with a BR using an x16 or x17 register.
-+ */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+/* We test both RTL patterns for a call which returns a value and a call which
-+ does not. */
-+int blr_call_value (struct sls_testclass x)
-+{
-+ int retval = x.x(x.left, x.right);
-+ if (retval % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+int blr_call (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+ if (x.left % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+/* { dg-final { scan-assembler-not {\tblr\t} } } */
-+/* { dg-final { scan-assembler-not {\tbr\tx(?!16|17)} } } */
-+/* { dg-final { scan-assembler {\tbr\tx(16|17)} } } */
-+
-diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-new file mode 100644
-index 000000000..88baffffe
---- /dev/null
-+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c
-@@ -0,0 +1,33 @@
-+/* { dg-additional-options "-mharden-sls=blr -save-temps" } */
-+/* Ensure that the SLS hardening of BLR leaves no BLR instructions.
-+ We only test that all BLR instructions have been removed, not that the
-+ resulting code makes sense. */
-+typedef int (foo) (int, int);
-+typedef void (bar) (int, int);
-+struct sls_testclass {
-+ foo *x;
-+ bar *y;
-+ int left;
-+ int right;
-+};
-+
-+/* We test both RTL patterns for a call which returns a value and a call which
-+ does not. */
-+int blr_call_value (struct sls_testclass x)
-+{
-+ int retval = x.x(x.left, x.right);
-+ if (retval % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+int blr_call (struct sls_testclass x)
-+{
-+ x.y(x.left, x.right);
-+ if (x.left % 10)
-+ return 100;
-+ return 9;
-+}
-+
-+/* { dg-final { scan-assembler-not {\tblr\t} } } */
-+/* { dg-final { scan-assembler {\tbr\tx[0-9][0-9]?} } } */
---
-2.25.1
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0040-fix-missing-dependencies-for-selftests.patch b/meta/recipes-devtools/gcc/gcc-9.3/0040-fix-missing-dependencies-for-selftests.patch
deleted file mode 100644
index c8960c6098..0000000000
--- a/meta/recipes-devtools/gcc/gcc-9.3/0040-fix-missing-dependencies-for-selftests.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From b19d8aac15649f31a7588b2634411a1922906ea8 Mon Sep 17 00:00:00 2001
-From: Romain Naour <romain.naour@gmail.com>
-Date: Wed, 3 Jun 2020 12:30:57 -0600
-Subject: [PATCH] Fix missing dependencies for selftests which occasionally
- causes failed builds.
-
-gcc/
-
- * Makefile.in (SELFTEST_DEPS): Move before including language makefile
- fragments.
-
-Upstream-Status: Backport [https://gcc.gnu.org/git/?p=gcc.git;a=commitdiff;h=b19d8aac15649f31a7588b2634411a1922906ea8]
-Signed-off-by:Steve Sakoman <steve@sakoman.com>
-
----
- gcc/Makefile.in | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
-diff --git a/gcc/Makefile.in b/gcc/Makefile.in
-index aab1dbba57b..be11311b60d 100644
---- a/gcc/Makefile.in
-+++ b/gcc/Makefile.in
-@@ -1735,6 +1735,10 @@ $(FULL_DRIVER_NAME): ./xgcc$(exeext)
- $(LN_S) $< $@
-
- #
-+# SELFTEST_DEPS need to be set before including language makefile fragments.
-+# Otherwise $(SELFTEST_DEPS) is empty when used from <LANG>/Make-lang.in.
-+SELFTEST_DEPS = $(GCC_PASSES) stmp-int-hdrs $(srcdir)/testsuite/selftests
-+
- # Language makefile fragments.
-
- # The following targets define the interface between us and the languages.
-@@ -2010,8 +2014,6 @@ DEVNULL=$(if $(findstring mingw,$(build)),nul,/dev/null)
- SELFTEST_FLAGS = -nostdinc $(DEVNULL) -S -o $(DEVNULL) \
- -fself-test=$(srcdir)/testsuite/selftests
-
--SELFTEST_DEPS = $(GCC_PASSES) stmp-int-hdrs $(srcdir)/testsuite/selftests
--
- # Run the selftests during the build once we have a driver and the frontend,
- # so that self-test failures are caught as early as possible.
- # Use "s-selftest-FE" to ensure that we only run the selftests if the
---
-2.27.0
-
diff --git a/meta/recipes-devtools/gcc/gcc-9.3.inc b/meta/recipes-devtools/gcc/gcc-9.5.inc
index c171f673e9..9bb41bbe24 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3.inc
+++ b/meta/recipes-devtools/gcc/gcc-9.5.inc
@@ -2,13 +2,13 @@ require gcc-common.inc
# Third digit in PV should be incremented after a minor release
-PV = "9.3.0"
+PV = "9.5.0"
# BINV should be incremented to a revision after a minor gcc release
-BINV = "9.3.0"
+BINV = "9.5.0"
-FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc-9.3:${FILE_DIRNAME}/gcc-9.3/backport:"
+FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc-9.5:${FILE_DIRNAME}/gcc-9.5/backport:"
DEPENDS =+ "mpfr gmp libmpc zlib flex-native"
NATIVEDEPS = "mpfr-native gmp-native libmpc-native zlib-native flex-native"
@@ -69,17 +69,14 @@ SRC_URI = "\
file://0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch \
file://0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch \
file://0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch \
- file://0040-fix-missing-dependencies-for-selftests.patch \
- file://0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch \
- file://0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch \
- file://0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch \
- file://0001-Backport-fix-for-PR-tree-optimization-97236-fix-bad-.patch \
+ file://0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch \
+ file://CVE-2023-4039.patch \
"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
-SRC_URI[sha256sum] = "71e197867611f6054aa1119b13a0c0abac12834765fe2d81f35ac57f84f742d1"
+SRC_URI[sha256sum] = "27769f64ef1d4cd5e2be8682c0c93f9887983e6cfd1a927ce5a0a2915a95cf8f"
# For dev release snapshotting
#S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/official-gcc-${RELEASE}"
-#B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
+B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
# Language Overrides
FORTRAN = ""
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch b/meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
index 0d9222df17..0d9222df17 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch b/meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch
index f427ee67c1..f427ee67c1 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0002-gcc-poison-system-directories.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0002-gcc-poison-system-directories.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch b/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch
new file mode 100644
index 0000000000..506064bfc2
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0002-libstdc-Fix-inconsistent-noexcept-specific-for-valar.patch
@@ -0,0 +1,44 @@
+From 60d966708d7cf105dccf128d2b7a38b0b2580a1a Mon Sep 17 00:00:00 2001
+From: Jonathan Wakely <jwakely@redhat.com>
+Date: Fri, 5 Nov 2021 21:42:20 +0000
+Subject: [PATCH] libstdc++: Fix inconsistent noexcept-specific for valarray
+ begin/end
+
+These declarations should be noexcept after I added it to the
+definitions in <valarray>.
+
+libstdc++-v3/ChangeLog:
+
+ * include/bits/range_access.h (begin(valarray), end(valarray)):
+ Add noexcept.
+
+(cherry picked from commit 2b2d97fc545635a0f6aa9c9ee3b017394bc494bf)
+
+Upstream-Status: Backport [https://github.com/hkaelber/gcc/commit/2b2d97fc545635a0f6aa9c9ee3b017394bc494bf]
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+
+---
+ libstdc++-v3/include/bits/range_access.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libstdc++-v3/include/bits/range_access.h b/libstdc++-v3/include/bits/range_access.h
+index 3d99ea92027..4736e75fda1 100644
+--- a/libstdc++-v3/include/bits/range_access.h
++++ b/libstdc++-v3/include/bits/range_access.h
+@@ -101,10 +101,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ template<typename _Tp> class valarray;
+ // These overloads must be declared for cbegin and cend to use them.
+- template<typename _Tp> _Tp* begin(valarray<_Tp>&);
+- template<typename _Tp> const _Tp* begin(const valarray<_Tp>&);
+- template<typename _Tp> _Tp* end(valarray<_Tp>&);
+- template<typename _Tp> const _Tp* end(const valarray<_Tp>&);
++ template<typename _Tp> _Tp* begin(valarray<_Tp>&) noexcept;
++ template<typename _Tp> const _Tp* begin(const valarray<_Tp>&) noexcept;
++ template<typename _Tp> _Tp* end(valarray<_Tp>&) noexcept;
++ template<typename _Tp> const _Tp* end(const valarray<_Tp>&) noexcept;
+
+ /**
+ * @brief Return an iterator pointing to the first element of
+--
+2.25.1 \ No newline at end of file
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch b/meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
index 23ec5bce03..23ec5bce03 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch b/meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch
index 17ec8986c1..17ec8986c1 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0004-64-bit-multilib-hack.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0004-64-bit-multilib-hack.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch b/meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch
index 3c28aeac63..3c28aeac63 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0005-optional-libstdc.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0005-optional-libstdc.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch b/meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch
index 906f3a7317..906f3a7317 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0006-COLLECT_GCC_OPTIONS.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0006-COLLECT_GCC_OPTIONS.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch b/meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
index 68a876cb95..68a876cb95 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch b/meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch
index 6acd2b0cf9..6acd2b0cf9 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0008-fortran-cross-compile-hack.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0008-fortran-cross-compile-hack.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch b/meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch
index 5a9e527606..5a9e527606 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0009-cpp-honor-sysroot.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0009-cpp-honor-sysroot.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch b/meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch
index a8103b951e..a8103b951e 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0010-MIPS64-Default-to-N64-ABI.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0010-MIPS64-Default-to-N64-ABI.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch b/meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
index d9d563d0f7..d9d563d0f7 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch b/meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch
index f0b79ee145..f0b79ee145 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0012-gcc-Fix-argument-list-too-long-error.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0012-gcc-Fix-argument-list-too-long-error.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch b/meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch
index 455858354f..455858354f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0013-Disable-sdt.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0013-Disable-sdt.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch b/meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch
index 2953859238..2953859238 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0014-libtool.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0014-libtool.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch b/meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
index d4445244e2..d4445244e2 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch b/meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
index 6f0833ccda..6f0833ccda 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch b/meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
index 96da013bf2..96da013bf2 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch b/meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch
index 2385099c25..2385099c25 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0018-export-CPP.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0018-export-CPP.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch b/meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch
index e0129d1f96..e0129d1f96 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0019-Ensure-target-gcc-headers-can-be-included.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0019-Ensure-target-gcc-headers-can-be-included.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch b/meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
index 1d2182140f..1d2182140f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0020-gcc-4.8-won-t-build-with-disable-dependency-tracking.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch b/meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
index e363c7d445..e363c7d445 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0021-Don-t-search-host-directory-during-relink-if-inst_pr.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch b/meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
index 846c0de5e8..846c0de5e8 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0022-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch b/meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch
index 102d6fc742..102d6fc742 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0023-aarch64-Add-support-for-musl-ldso.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0023-aarch64-Add-support-for-musl-ldso.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch b/meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
index 443e0a2ca6..443e0a2ca6 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0024-libcc1-fix-libcc1-s-install-path-and-rpath.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch b/meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch
index 59ac97eaed..59ac97eaed 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0025-handle-sysroot-support-for-nativesdk-gcc.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0025-handle-sysroot-support-for-nativesdk-gcc.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch b/meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
index abfa7516da..abfa7516da 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0026-Search-target-sysroot-gcc-version-specific-dirs-with.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch b/meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
index ae8acc7f13..ae8acc7f13 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0027-Fix-various-_FOR_BUILD-and-related-variables.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch b/meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
index 52a5d97aef..52a5d97aef 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0028-nios2-Define-MUSL_DYNAMIC_LINKER.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch b/meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
index bfa7e19dd0..bfa7e19dd0 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0029-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch b/meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch
index f8e8c07f62..f8e8c07f62 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0030-ldbl128-config.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0030-ldbl128-config.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch b/meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
index 60a29fc94d..60a29fc94d 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0031-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch b/meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
index 6f048dab82..6f048dab82 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0032-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch b/meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch
index f080b0596f..f080b0596f 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0033-sync-gcc-stddef.h-with-musl.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0033-sync-gcc-stddef.h-with-musl.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch b/meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
index 3b7ccb3e3d..3b7ccb3e3d 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0034-fix-segmentation-fault-in-precompiled-header-generat.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch b/meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch
index 5e199fbcfd..5e199fbcfd 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0035-Fix-for-testsuite-failure.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0035-Fix-for-testsuite-failure.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch b/meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch
index 825e070aa3..825e070aa3 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0036-Re-introduce-spe-commandline-options.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0036-Re-introduce-spe-commandline-options.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch b/meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
index f268a4eb58..f268a4eb58 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0037-CVE-2019-14250-Check-zero-value-in-simple_object_elf.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch b/meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
index a79fc03d15..a79fc03d15 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0038-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch b/meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
index b69114d1e5..b69114d1e5 100644
--- a/meta/recipes-devtools/gcc/gcc-9.3/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
+++ b/meta/recipes-devtools/gcc/gcc-9.5/0039-process_alt_operands-Don-t-match-user-defined-regs-o.patch
diff --git a/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch b/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch
new file mode 100644
index 0000000000..56d229066f
--- /dev/null
+++ b/meta/recipes-devtools/gcc/gcc-9.5/CVE-2023-4039.patch
@@ -0,0 +1,1506 @@
+From: Richard Sandiford <richard.sandiford@arm.com>
+Subject: [PATCH 00/19] aarch64: Fix -fstack-protector issue
+Date: Tue, 12 Sep 2023 16:25:10 +0100
+
+This series of patches fixes deficiencies in GCC's -fstack-protector
+implementation for AArch64 when using dynamically allocated stack space.
+This is CVE-2023-4039. See:
+
+https://developer.arm.com/Arm%20Security%20Center/GCC%20Stack%20Protector%20Vulnerability%20AArch64
+https://github.com/metaredteam/external-disclosures/security/advisories/GHSA-x7ch-h5rf-w2mf
+
+for more details.
+
+The fix is to put the saved registers above the locals area when
+-fstack-protector is used.
+
+The series also fixes a stack-clash problem that I found while working
+on the CVE. In unpatched sources, the stack-clash problem would only
+trigger for unrealistic numbers of arguments (8K 64-bit arguments, or an
+equivalent). But it would be a more significant issue with the new
+-fstack-protector frame layout. It's therefore important that both
+problems are fixed together.
+
+Some reorganisation of the code seemed necessary to fix the problems in a
+cleanish way. The series is therefore quite long, but only a handful of
+patches should have any effect on code generation.
+
+See the individual patches for a detailed description.
+
+Tested on aarch64-linux-gnu. Pushed to trunk and to all active branches.
+I've also pushed backports to GCC 7+ to vendors/ARM/heads/CVE-2023-4039.
+
+CVE: CVE-2023-4039
+Upstream-Status: Submitted
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+
+From 78ebdb7b12d5e258b9811bab715734454268fd0c Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Fri, 16 Jun 2023 17:00:51 +0100
+Subject: [PATCH 01/10] aarch64: Explicitly handle frames with no saved
+ registers
+
+If a frame has no saved registers, it can be allocated in one go.
+There is no need to treat the areas below and above the saved
+registers as separate.
+
+And if we allocate the frame in one go, it should be allocated
+as the initial_adjust rather than the final_adjust. This allows the
+frame size to grow to guard_size - guard_used_by_caller before a stack
+probe is needed. (A frame with no register saves is necessarily a
+leaf frame.)
+
+This is a no-op as thing stand, since a leaf function will have
+no outgoing arguments, and so all the frame will be above where
+the saved registers normally go.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Explicitly
+ allocate the frame in one go if there are no saved registers.
+---
+ gcc/config/aarch64/aarch64.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index a35dceab9fc..e9dad682738 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4771,9 +4771,11 @@ aarch64_layout_frame (void)
+ max_push_offset = 256;
+
+ HOST_WIDE_INT const_size, const_fp_offset;
+- if (cfun->machine->frame.frame_size.is_constant (&const_size)
+- && const_size < max_push_offset
+- && known_eq (crtl->outgoing_args_size, 0))
++ if (cfun->machine->frame.saved_regs_size == 0)
++ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
++ else if (cfun->machine->frame.frame_size.is_constant (&const_size)
++ && const_size < max_push_offset
++ && known_eq (crtl->outgoing_args_size, 0))
+ {
+ /* Simple, small frame with no outgoing arguments:
+ stp reg1, reg2, [sp, -frame_size]!
+--
+2.34.1
+
+
+From 347487fffa0266d43bf18f1f91878410881f596e Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Fri, 16 Jun 2023 16:55:12 +0100
+Subject: [PATCH 02/10] aarch64: Add bytes_below_hard_fp to frame info
+
+The frame layout code currently hard-codes the assumption that
+the number of bytes below the saved registers is equal to the
+size of the outgoing arguments. This patch abstracts that
+value into a new field of aarch64_frame.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::bytes_below_hard_fp): New
+ field.
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Initialize it,
+ and use it instead of crtl->outgoing_args_size.
+ (aarch64_get_separate_components): Use bytes_below_hard_fp instead
+ of outgoing_args_size.
+ (aarch64_process_components): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 50 +++++++++++++++++++-----------------
+ gcc/config/aarch64/aarch64.h | 6 ++++-
+ 2 files changed, 32 insertions(+), 24 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index e9dad682738..25cf10cc4b9 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4684,6 +4684,8 @@ aarch64_layout_frame (void)
+ last_fp_reg = regno;
+ }
+
++ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
++
+ if (cfun->machine->frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+@@ -4751,11 +4753,11 @@ aarch64_layout_frame (void)
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Both these values are already aligned. */
+- gcc_assert (multiple_p (crtl->outgoing_args_size,
++ gcc_assert (multiple_p (cfun->machine->frame.bytes_below_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT));
+ cfun->machine->frame.frame_size
+ = (cfun->machine->frame.hard_fp_offset
+- + crtl->outgoing_args_size);
++ + cfun->machine->frame.bytes_below_hard_fp);
+
+ cfun->machine->frame.locals_offset = cfun->machine->frame.saved_varargs_size;
+
+@@ -4775,23 +4777,23 @@ aarch64_layout_frame (void)
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ else if (cfun->machine->frame.frame_size.is_constant (&const_size)
+ && const_size < max_push_offset
+- && known_eq (crtl->outgoing_args_size, 0))
++ && known_eq (cfun->machine->frame.bytes_below_hard_fp, 0))
+ {
+- /* Simple, small frame with no outgoing arguments:
++ /* Simple, small frame with no data below the saved registers.
+ stp reg1, reg2, [sp, -frame_size]!
+ stp reg3, reg4, [sp, 16] */
+ cfun->machine->frame.callee_adjust = const_size;
+ }
+- else if (known_lt (crtl->outgoing_args_size
++ else if (known_lt (cfun->machine->frame.bytes_below_hard_fp
+ + cfun->machine->frame.saved_regs_size, 512)
+ && !(cfun->calls_alloca
+ && known_lt (cfun->machine->frame.hard_fp_offset,
+ max_push_offset)))
+ {
+- /* Frame with small outgoing arguments:
++ /* Frame with small area below the saved registers:
+ sub sp, sp, frame_size
+- stp reg1, reg2, [sp, outgoing_args_size]
+- stp reg3, reg4, [sp, outgoing_args_size + 16] */
++ stp reg1, reg2, [sp, bytes_below_hard_fp]
++ stp reg3, reg4, [sp, bytes_below_hard_fp + 16] */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ cfun->machine->frame.callee_offset
+ = cfun->machine->frame.frame_size - cfun->machine->frame.hard_fp_offset;
+@@ -4799,22 +4801,23 @@ aarch64_layout_frame (void)
+ else if (cfun->machine->frame.hard_fp_offset.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset)
+ {
+- /* Frame with large outgoing arguments but a small local area:
++ /* Frame with large area below the saved registers, but with a
++ small area above:
+ stp reg1, reg2, [sp, -hard_fp_offset]!
+ stp reg3, reg4, [sp, 16]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.callee_adjust = const_fp_offset;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.callee_adjust;
+ }
+ else
+ {
+- /* Frame with large local area and outgoing arguments using frame pointer:
++ /* General case:
+ sub sp, sp, hard_fp_offset
+ stp x29, x30, [sp, 0]
+ add x29, sp, 0
+ stp reg3, reg4, [sp, 16]
+- sub sp, sp, outgoing_args_size */
++ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.hard_fp_offset;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
+@@ -5243,9 +5246,11 @@ aarch64_get_separate_components (void)
+ if (aarch64_register_saved_on_entry (regno))
+ {
+ poly_int64 offset = cfun->machine->frame.reg_offset[regno];
++
++ /* Get the offset relative to the register we'll use. */
+ if (!frame_pointer_needed)
+- offset += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset += cfun->machine->frame.bytes_below_hard_fp;
++
+ /* Check that we can access the stack slot of the register with one
+ direct load with no adjustments needed. */
+ if (offset_12bit_unsigned_scaled_p (DImode, offset))
+@@ -5367,8 +5372,8 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ rtx reg = gen_rtx_REG (mode, regno);
+ poly_int64 offset = cfun->machine->frame.reg_offset[regno];
+ if (!frame_pointer_needed)
+- offset += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset += cfun->machine->frame.bytes_below_hard_fp;
++
+ rtx addr = plus_constant (Pmode, ptr_reg, offset);
+ rtx mem = gen_frame_mem (mode, addr);
+
+@@ -5410,8 +5415,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+ /* REGNO2 can be saved/restored in a pair with REGNO. */
+ rtx reg2 = gen_rtx_REG (mode, regno2);
+ if (!frame_pointer_needed)
+- offset2 += cfun->machine->frame.frame_size
+- - cfun->machine->frame.hard_fp_offset;
++ offset2 += cfun->machine->frame.bytes_below_hard_fp;
+ rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
+ rtx mem2 = gen_frame_mem (mode, addr2);
+ rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
+@@ -5478,10 +5482,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+ registers. If POLY_SIZE is not large enough to require a probe this function
+ will only adjust the stack. When allocating the stack space
+ FRAME_RELATED_P is then used to indicate if the allocation is frame related.
+- FINAL_ADJUSTMENT_P indicates whether we are allocating the outgoing
+- arguments. If we are then we ensure that any allocation larger than the ABI
+- defined buffer needs a probe so that the invariant of having a 1KB buffer is
+- maintained.
++ FINAL_ADJUSTMENT_P indicates whether we are allocating the area below
++ the saved registers. If we are then we ensure that any allocation
++ larger than the ABI defined buffer needs a probe so that the
++ invariant of having a 1KB buffer is maintained.
+
+ We emit barriers after each stack adjustment to prevent optimizations from
+ breaking the invariant that we never drop the stack more than a page. This
+@@ -5671,7 +5675,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ /* Handle any residuals. Residuals of at least MIN_PROBE_THRESHOLD have to
+ be probed. This maintains the requirement that each page is probed at
+ least once. For initial probing we probe only if the allocation is
+- more than GUARD_SIZE - buffer, and for the outgoing arguments we probe
++ more than GUARD_SIZE - buffer, and below the saved registers we probe
+ if the amount is larger than buffer. GUARD_SIZE - buffer + buffer ==
+ GUARD_SIZE. This works that for any allocation that is large enough to
+ trigger a probe here, we'll have at least one, and if they're not large
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index af0bc3f1881..95831637ba7 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -712,9 +712,13 @@ struct GTY (()) aarch64_frame
+ HOST_WIDE_INT saved_varargs_size;
+
+ /* The size of the saved callee-save int/FP registers. */
+-
+ HOST_WIDE_INT saved_regs_size;
+
++ /* The number of bytes between the bottom of the static frame (the bottom
++ of the outgoing arguments) and the hard frame pointer. This value is
++ always a multiple of STACK_BOUNDARY. */
++ poly_int64 bytes_below_hard_fp;
++
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+--
+2.34.1
+
+
+From 4604c4cd0a6c4c26d6594ec9a0383b4d9197d9df Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 11:25:40 +0100
+Subject: [PATCH 03/10] aarch64: Rename locals_offset to bytes_above_locals
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+locals_offset was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ top of the locals area. This value is always a multiple of
+ STACK_BOUNDARY. */
+
+This is implicitly an “upside down” view of the frame: the incoming
+SP is at offset 0, and anything N bytes below the incoming SP is at
+offset N (rather than -N).
+
+However, reg_offset instead uses a “right way up” view; that is,
+it views offsets in address terms. Something above X is at a
+positive offset from X and something below X is at a negative
+offset from X.
+
+Also, even on FRAME_GROWS_DOWNWARD targets like AArch64,
+target-independent code views offsets in address terms too:
+locals are allocated at negative offsets to virtual_stack_vars.
+
+It seems confusing to have *_offset fields of the same structure
+using different polarities like this. This patch tries to avoid
+that by renaming locals_offset to bytes_above_locals.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::locals_offset): Rename to...
+ (aarch64_frame::bytes_above_locals): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_initial_elimination_offset): Update accordingly.
+---
+ gcc/config/aarch64/aarch64.c | 9 +++++----
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 25cf10cc4b9..dcaf491af42 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4759,7 +4759,8 @@ aarch64_layout_frame (void)
+ = (cfun->machine->frame.hard_fp_offset
+ + cfun->machine->frame.bytes_below_hard_fp);
+
+- cfun->machine->frame.locals_offset = cfun->machine->frame.saved_varargs_size;
++ cfun->machine->frame.bytes_above_locals
++ = cfun->machine->frame.saved_varargs_size;
+
+ cfun->machine->frame.initial_adjust = 0;
+ cfun->machine->frame.final_adjust = 0;
+@@ -8566,14 +8567,14 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+
+ if (from == FRAME_POINTER_REGNUM)
+ return cfun->machine->frame.hard_fp_offset
+- - cfun->machine->frame.locals_offset;
++ - cfun->machine->frame.bytes_above_locals;
+ }
+
+ if (to == STACK_POINTER_REGNUM)
+ {
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.frame_size
+- - cfun->machine->frame.locals_offset;
++ return cfun->machine->frame.frame_size
++ - cfun->machine->frame.bytes_above_locals;
+ }
+
+ return cfun->machine->frame.frame_size;
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 95831637ba7..a079a88b4f4 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -719,10 +719,10 @@ struct GTY (()) aarch64_frame
+ always a multiple of STACK_BOUNDARY. */
+ poly_int64 bytes_below_hard_fp;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- top of the locals area. This value is always a multiple of
++ /* The number of bytes between the top of the locals area and the top
++ of the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 locals_offset;
++ poly_int64 bytes_above_locals;
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+--
+2.34.1
+
+
+From 16016465ff28a75f5e0540cbaeb4eb102fdc3230 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 11:28:11 +0100
+Subject: [PATCH 04/10] aarch64: Rename hard_fp_offset to bytes_above_hard_fp
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Similarly to the previous locals_offset patch, hard_fp_offset
+was described as:
+
+ /* Offset from the base of the frame (incomming SP) to the
+ hard_frame_pointer. This value is always a multiple of
+ STACK_BOUNDARY. */
+ poly_int64 hard_fp_offset;
+
+which again took an “upside-down” view: higher offsets meant lower
+addresses. This patch renames the field to bytes_above_hard_fp instead.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::hard_fp_offset): Rename
+ to...
+ (aarch64_frame::bytes_above_hard_fp): ...this.
+ * config/aarch64/aarch64.c (aarch64_layout_frame)
+ (aarch64_expand_prologue): Update accordingly.
+ (aarch64_initial_elimination_offset): Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 21 +++++++++++----------
+ gcc/config/aarch64/aarch64.h | 6 +++---
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index dcaf491af42..2681e0c2bb9 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4747,7 +4747,7 @@ aarch64_layout_frame (void)
+ HOST_WIDE_INT varargs_and_saved_regs_size
+ = offset + cfun->machine->frame.saved_varargs_size;
+
+- cfun->machine->frame.hard_fp_offset
++ cfun->machine->frame.bytes_above_hard_fp
+ = aligned_upper_bound (varargs_and_saved_regs_size
+ + get_frame_size (),
+ STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -4756,7 +4756,7 @@ aarch64_layout_frame (void)
+ gcc_assert (multiple_p (cfun->machine->frame.bytes_below_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT));
+ cfun->machine->frame.frame_size
+- = (cfun->machine->frame.hard_fp_offset
++ = (cfun->machine->frame.bytes_above_hard_fp
+ + cfun->machine->frame.bytes_below_hard_fp);
+
+ cfun->machine->frame.bytes_above_locals
+@@ -4788,7 +4788,7 @@ aarch64_layout_frame (void)
+ else if (known_lt (cfun->machine->frame.bytes_below_hard_fp
+ + cfun->machine->frame.saved_regs_size, 512)
+ && !(cfun->calls_alloca
+- && known_lt (cfun->machine->frame.hard_fp_offset,
++ && known_lt (cfun->machine->frame.bytes_above_hard_fp,
+ max_push_offset)))
+ {
+ /* Frame with small area below the saved registers:
+@@ -4797,14 +4797,14 @@ aarch64_layout_frame (void)
+ stp reg3, reg4, [sp, bytes_below_hard_fp + 16] */
+ cfun->machine->frame.initial_adjust = cfun->machine->frame.frame_size;
+ cfun->machine->frame.callee_offset
+- = cfun->machine->frame.frame_size - cfun->machine->frame.hard_fp_offset;
++ = cfun->machine->frame.frame_size - cfun->machine->frame.bytes_above_hard_fp;
+ }
+- else if (cfun->machine->frame.hard_fp_offset.is_constant (&const_fp_offset)
++ else if (cfun->machine->frame.bytes_above_hard_fp.is_constant (&const_fp_offset)
+ && const_fp_offset < max_push_offset)
+ {
+ /* Frame with large area below the saved registers, but with a
+ small area above:
+- stp reg1, reg2, [sp, -hard_fp_offset]!
++ stp reg1, reg2, [sp, -bytes_above_hard_fp]!
+ stp reg3, reg4, [sp, 16]
+ sub sp, sp, bytes_below_hard_fp */
+ cfun->machine->frame.callee_adjust = const_fp_offset;
+@@ -4814,12 +4814,13 @@ aarch64_layout_frame (void)
+ else
+ {
+ /* General case:
+- sub sp, sp, hard_fp_offset
++ sub sp, sp, bytes_above_hard_fp
+ stp x29, x30, [sp, 0]
+ add x29, sp, 0
+ stp reg3, reg4, [sp, 16]
+ sub sp, sp, bytes_below_hard_fp */
+- cfun->machine->frame.initial_adjust = cfun->machine->frame.hard_fp_offset;
++ cfun->machine->frame.initial_adjust
++ = cfun->machine->frame.bytes_above_hard_fp;
+ cfun->machine->frame.final_adjust
+ = cfun->machine->frame.frame_size - cfun->machine->frame.initial_adjust;
+ }
+@@ -8563,10 +8564,10 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+ if (to == HARD_FRAME_POINTER_REGNUM)
+ {
+ if (from == ARG_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset;
++ return cfun->machine->frame.bytes_above_hard_fp;
+
+ if (from == FRAME_POINTER_REGNUM)
+- return cfun->machine->frame.hard_fp_offset
++ return cfun->machine->frame.bytes_above_hard_fp
+ - cfun->machine->frame.bytes_above_locals;
+ }
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index a079a88b4f4..eab6da84a02 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -724,10 +724,10 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_locals;
+
+- /* Offset from the base of the frame (incomming SP) to the
+- hard_frame_pointer. This value is always a multiple of
++ /* The number of bytes between the hard_frame_pointer and the top of
++ the frame (the incomming SP). This value is always a multiple of
+ STACK_BOUNDARY. */
+- poly_int64 hard_fp_offset;
++ poly_int64 bytes_above_hard_fp;
+
+ /* The size of the frame. This value is the offset from base of the
+ frame (incomming SP) to the stack_pointer. This value is always
+--
+2.34.1
+
+
+From eb2271eb6bb68ec3c9aa9ae4746ea1ee5f18874a Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Thu, 22 Jun 2023 22:26:30 +0100
+Subject: [PATCH 05/10] aarch64: Tweak frame_size comment
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch fixes another case in which a value was described with
+an “upside-down” view.
+
+gcc/
+ * config/aarch64/aarch64.h (aarch64_frame::frame_size): Tweak comment.
+---
+ gcc/config/aarch64/aarch64.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index eab6da84a02..7c4b65ec55b 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -729,8 +729,8 @@ struct GTY (()) aarch64_frame
+ STACK_BOUNDARY. */
+ poly_int64 bytes_above_hard_fp;
+
+- /* The size of the frame. This value is the offset from base of the
+- frame (incomming SP) to the stack_pointer. This value is always
++ /* The size of the frame, i.e. the number of bytes between the bottom
++ of the outgoing arguments and the incoming SP. This value is always
+ a multiple of STACK_BOUNDARY. */
+ poly_int64 frame_size;
+
+--
+2.34.1
+
+
+From cfed3b87e9351edff1568ade4ef666edc9887639 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 15 Aug 2023 19:05:30 +0100
+Subject: [PATCH 06/10] Backport check-function-bodies support
+
+---
+ gcc/testsuite/lib/scanasm.exp | 191 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 191 insertions(+)
+
+diff --git a/gcc/testsuite/lib/scanasm.exp b/gcc/testsuite/lib/scanasm.exp
+index 35ccbc86fc0..c9af27bf47a 100644
+--- a/gcc/testsuite/lib/scanasm.exp
++++ b/gcc/testsuite/lib/scanasm.exp
+@@ -546,3 +546,194 @@ proc scan-lto-assembler { args } {
+ verbose "output_file: $output_file"
+ dg-scan "scan-lto-assembler" 1 $testcase $output_file $args
+ }
++
++# Read assembly file FILENAME and store a mapping from function names
++# to function bodies in array RESULT. FILENAME has already been uploaded
++# locally where necessary and is known to exist.
++
++proc parse_function_bodies { filename result } {
++ upvar $result up_result
++
++ # Regexp for the start of a function definition (name in \1).
++ set label {^([a-zA-Z_]\S+):$}
++
++ # Regexp for the end of a function definition.
++ set terminator {^\s*\.size}
++
++ # Regexp for lines that aren't interesting.
++ set fluff {^\s*(?:\.|//|@|$)}
++
++ set fd [open $filename r]
++ set in_function 0
++ while { [gets $fd line] >= 0 } {
++ if { [regexp $label $line dummy function_name] } {
++ set in_function 1
++ set function_body ""
++ } elseif { $in_function } {
++ if { [regexp $terminator $line] } {
++ set up_result($function_name) $function_body
++ set in_function 0
++ } elseif { ![regexp $fluff $line] } {
++ append function_body $line "\n"
++ }
++ }
++ }
++ close $fd
++}
++
++# FUNCTIONS is an array that maps function names to function bodies.
++# Return true if it contains a definition of function NAME and if
++# that definition matches BODY_REGEXP.
++
++proc check_function_body { functions name body_regexp } {
++ upvar $functions up_functions
++
++ if { ![info exists up_functions($name)] } {
++ return 0
++ }
++ set fn_res [regexp "^$body_regexp\$" $up_functions($name)]
++ if { !$fn_res } {
++ verbose -log "body: $body_regexp"
++ verbose -log "against: $up_functions($name)"
++ }
++ return $fn_res
++}
++
++# Check the implementations of functions against expected output. Used as:
++#
++# { dg-do { check-function-bodies PREFIX TERMINATOR[ OPTION[ SELECTOR]] } }
++#
++# See sourcebuild.texi for details.
++
++proc check-function-bodies { args } {
++ if { [llength $args] < 2 } {
++ error "too few arguments to check-function-bodies"
++ }
++ if { [llength $args] > 4 } {
++ error "too many arguments to check-function-bodies"
++ }
++
++ if { [llength $args] >= 3 } {
++ set required_flags [lindex $args 2]
++
++ upvar 2 dg-extra-tool-flags extra_tool_flags
++ set flags $extra_tool_flags
++
++ global torture_current_flags
++ if { [info exists torture_current_flags] } {
++ append flags " " $torture_current_flags
++ }
++ foreach required_flag $required_flags {
++ switch -- $required_flag {
++ target -
++ xfail {
++ error "misplaced $required_flag in check-function-bodies"
++ }
++ }
++ }
++ foreach required_flag $required_flags {
++ if { ![regexp " $required_flag " $flags] } {
++ return
++ }
++ }
++ }
++
++ set xfail_all 0
++ if { [llength $args] >= 4 } {
++ switch [dg-process-target [lindex $args 3]] {
++ "S" { }
++ "N" { return }
++ "F" { set xfail_all 1 }
++ "P" { }
++ }
++ }
++
++ set testcase [testname-for-summary]
++ # The name might include a list of options; extract the file name.
++ set filename [lindex $testcase 0]
++
++ global srcdir
++ set input_filename "$srcdir/$filename"
++ set output_filename "[file rootname [file tail $filename]].s"
++
++ set prefix [lindex $args 0]
++ set prefix_len [string length $prefix]
++ set terminator [lindex $args 1]
++ if { [string equal $terminator ""] } {
++ set terminator "*/"
++ }
++ set terminator_len [string length $terminator]
++
++ set have_bodies 0
++ if { [is_remote host] } {
++ remote_upload host "$filename"
++ }
++ if { [file exists $output_filename] } {
++ parse_function_bodies $output_filename functions
++ set have_bodies 1
++ } else {
++ verbose -log "$testcase: output file does not exist"
++ }
++
++ set count 0
++ set function_regexp ""
++ set label {^(\S+):$}
++
++ set lineno 1
++ set fd [open $input_filename r]
++ set in_function 0
++ while { [gets $fd line] >= 0 } {
++ if { [string equal -length $prefix_len $line $prefix] } {
++ set line [string trim [string range $line $prefix_len end]]
++ if { !$in_function } {
++ if { [regexp "^(.*?\\S)\\s+{(.*)}\$" $line dummy \
++ line selector] } {
++ set selector [dg-process-target $selector]
++ } else {
++ set selector "P"
++ }
++ if { ![regexp $label $line dummy function_name] } {
++ close $fd
++ error "check-function-bodies: line $lineno does not have a function label"
++ }
++ set in_function 1
++ set function_regexp ""
++ } elseif { [string equal $line "("] } {
++ append function_regexp "(?:"
++ } elseif { [string equal $line "|"] } {
++ append function_regexp "|"
++ } elseif { [string equal $line ")"] } {
++ append function_regexp ")"
++ } elseif { [string equal $line "..."] } {
++ append function_regexp ".*"
++ } else {
++ append function_regexp "\t" $line "\n"
++ }
++ } elseif { [string equal -length $terminator_len $line $terminator] } {
++ if { ![string equal $selector "N"] } {
++ if { $xfail_all || [string equal $selector "F"] } {
++ setup_xfail "*-*-*"
++ }
++ set testname "$testcase check-function-bodies $function_name"
++ if { !$have_bodies } {
++ unresolved $testname
++ } elseif { [check_function_body functions $function_name \
++ $function_regexp] } {
++ pass $testname
++ } else {
++ fail $testname
++ }
++ }
++ set in_function 0
++ incr count
++ }
++ incr lineno
++ }
++ close $fd
++ if { $in_function } {
++ error "check-function-bodies: missing \"$terminator\""
++ }
++ if { $count == 0 } {
++ error "check-function-bodies: no matches found"
++ }
++}
+--
+2.34.1
+
+
+From 4dd8925d95d3d6d89779b494b5f4cfadcf9fa96e Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 15:11:44 +0100
+Subject: [PATCH 07/10] aarch64: Tweak stack clash boundary condition
+
+The AArch64 ABI says that, when stack clash protection is used,
+there can be a maximum of 1KiB of unprobed space at sp on entry
+to a function. Therefore, we need to probe when allocating
+>= guard_size - 1KiB of data (>= rather than >). This is what
+GCC does.
+
+If an allocation is exactly guard_size bytes, it is enough to allocate
+those bytes and probe once at offset 1024. It isn't possible to use a
+single probe at any other offset: higher would conmplicate later code,
+by leaving more unprobed space than usual, while lower would risk
+leaving an entire page unprobed. For simplicity, the code probes all
+allocations at offset 1024.
+
+Some register saves also act as probes. If we need to allocate
+more space below the last such register save probe, we need to
+probe the allocation if it is > 1KiB. Again, this allocation is
+then sometimes (but not always) probed at offset 1024. This sort of
+allocation is currently only used for outgoing arguments, which are
+rarely this big.
+
+However, the code also probed if this final outgoing-arguments
+allocation was == 1KiB, rather than just > 1KiB. This isn't
+necessary, since the register save then probes at offset 1024
+as required. Continuing to probe allocations of exactly 1KiB
+would complicate later patches.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Don't probe final allocations that are exactly 1KiB in size (after
+ unprobed space above the final allocation has been deducted).
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 6 +-
+ .../aarch64/stack-check-prologue-17.c | 55 +++++++++++++++++++
+ 2 files changed, 60 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 2681e0c2bb9..4c9e11cd7cf 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -5506,6 +5506,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT guard_size
+ = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
++ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
++ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+ /* When doing the final adjustment for the outgoing argument size we can't
+ assume that LR was saved at position 0. So subtract it's offset from the
+ ABI safe buffer so that we don't accidentally allow an adjustment that
+@@ -5513,7 +5515,9 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ probing. */
+ HOST_WIDE_INT min_probe_threshold
+ = final_adjustment_p
+- ? guard_used_by_caller - cfun->machine->frame.reg_offset[LR_REGNUM]
++ ? (guard_used_by_caller
++ + byte_sp_alignment
++ - cfun->machine->frame.reg_offset[LR_REGNUM])
+ : guard_size - guard_used_by_caller;
+
+ poly_int64 frame_size = cfun->machine->frame.frame_size;
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+new file mode 100644
+index 00000000000..0d8a25d73a2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -0,0 +1,55 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From 12517baf6c88447e3bda3a459ac4c29d61f84e6c Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 27 Jun 2023 15:12:55 +0100
+Subject: [PATCH 08/10] aarch64: Put LR save probe in first 16 bytes
+
+-fstack-clash-protection uses the save of LR as a probe for the next
+allocation. The next allocation could be:
+
+* another part of the static frame, e.g. when allocating SVE save slots
+ or outgoing arguments
+
+* an alloca in the same function
+
+* an allocation made by a callee function
+
+However, when -fomit-frame-pointer is used, the LR save slot is placed
+above the other GPR save slots. It could therefore be up to 80 bytes
+above the base of the GPR save area (which is also the hard fp address).
+
+aarch64_allocate_and_probe_stack_space took this into account when
+deciding how much subsequent space could be allocated without needing
+a probe. However, it interacted badly with:
+
+ /* If doing a small final adjustment, we always probe at offset 0.
+ This is done to avoid issues when LR is not at position 0 or when
+ the final adjustment is smaller than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+which forces any allocation that is smaller than the guard page size
+to be probed at offset 0 rather than the usual offset 1024. It was
+therefore possible to construct cases in which we had:
+
+* a probe using LR at SP + 80 bytes (or some other value >= 16)
+* an allocation of the guard page size - 16 bytes
+* a probe at SP + 0
+
+which allocates guard page size + 64 consecutive unprobed bytes.
+
+This patch requires the LR probe to be in the first 16 bytes of the
+save area when stack clash protection is active. Doing it
+unconditionally would cause code-quality regressions.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_layout_frame): Ensure that
+ the LR save slot is in the first 16 bytes of the register save area.
+ (aarch64_allocate_and_probe_stack_space): Remove workaround for
+ when LR was not in the first 16 bytes.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-18.c: New test.
+---
+ gcc/config/aarch64/aarch64.c | 50 +++++----
+ .../aarch64/stack-check-prologue-18.c | 100 ++++++++++++++++++
+ 2 files changed, 127 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 4c9e11cd7cf..1e8467fdd03 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4686,15 +4686,31 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
+
++#define ALLOCATE_GPR_SLOT(REGNO) \
++ do \
++ { \
++ cfun->machine->frame.reg_offset[REGNO] = offset; \
++ if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM) \
++ cfun->machine->frame.wb_candidate1 = (REGNO); \
++ else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM) \
++ cfun->machine->frame.wb_candidate2 = (REGNO); \
++ offset += UNITS_PER_WORD; \
++ } \
++ while (0)
++
+ if (cfun->machine->frame.emit_frame_chain)
+ {
+ /* FP and LR are placed in the linkage record. */
+- cfun->machine->frame.reg_offset[R29_REGNUM] = 0;
+- cfun->machine->frame.wb_candidate1 = R29_REGNUM;
+- cfun->machine->frame.reg_offset[R30_REGNUM] = UNITS_PER_WORD;
+- cfun->machine->frame.wb_candidate2 = R30_REGNUM;
+- offset = 2 * UNITS_PER_WORD;
++ ALLOCATE_GPR_SLOT (R29_REGNUM);
++ ALLOCATE_GPR_SLOT (R30_REGNUM);
+ }
++ else if (flag_stack_clash_protection
++ && cfun->machine->frame.reg_offset[R30_REGNUM] == SLOT_REQUIRED)
++ /* Put the LR save slot first, since it makes a good choice of probe
++ for stack clash purposes. The idea is that the link register usually
++ has to be saved before a call anyway, and so we lose little by
++ stopping it from being individually shrink-wrapped. */
++ ALLOCATE_GPR_SLOT (R30_REGNUM);
+
+ /* With stack-clash, LR must be saved in non-leaf functions. */
+ gcc_assert (crtl->is_leaf
+@@ -4704,14 +4720,9 @@ aarch64_layout_frame (void)
+ /* Now assign stack slots for them. */
+ for (regno = R0_REGNUM; regno <= R30_REGNUM; regno++)
+ if (cfun->machine->frame.reg_offset[regno] == SLOT_REQUIRED)
+- {
+- cfun->machine->frame.reg_offset[regno] = offset;
+- if (cfun->machine->frame.wb_candidate1 == INVALID_REGNUM)
+- cfun->machine->frame.wb_candidate1 = regno;
+- else if (cfun->machine->frame.wb_candidate2 == INVALID_REGNUM)
+- cfun->machine->frame.wb_candidate2 = regno;
+- offset += UNITS_PER_WORD;
+- }
++ ALLOCATE_GPR_SLOT (regno);
++
++#undef ALLOCATE_GPR_SLOT
+
+ HOST_WIDE_INT max_int_offset = offset;
+ offset = ROUND_UP (offset, STACK_BOUNDARY / BITS_PER_UNIT);
+@@ -5508,16 +5519,9 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
+ HOST_WIDE_INT byte_sp_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ gcc_assert (multiple_p (poly_size, byte_sp_alignment));
+- /* When doing the final adjustment for the outgoing argument size we can't
+- assume that LR was saved at position 0. So subtract it's offset from the
+- ABI safe buffer so that we don't accidentally allow an adjustment that
+- would result in an allocation larger than the ABI buffer without
+- probing. */
+ HOST_WIDE_INT min_probe_threshold
+ = final_adjustment_p
+- ? (guard_used_by_caller
+- + byte_sp_alignment
+- - cfun->machine->frame.reg_offset[LR_REGNUM])
++ ? guard_used_by_caller + byte_sp_alignment
+ : guard_size - guard_used_by_caller;
+
+ poly_int64 frame_size = cfun->machine->frame.frame_size;
+@@ -5697,8 +5701,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+ /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when LR is not at position 0 or when
+- the final adjustment is smaller than the probing offset. */
++ This is done to avoid issues when the final adjustment is smaller
++ than the probing offset. */
+ else if (final_adjustment_p && rounded_size == 0)
+ residual_probe_offset = 0;
+
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+new file mode 100644
+index 00000000000..82447d20fff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -0,0 +1,100 @@
++/* { dg-options "-O2 -fstack-clash-protection -fomit-frame-pointer --param stack-clash-protection-guard-size=12" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void f(int, ...);
++void g();
++
++/*
++** test1:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #4064
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++** str x26, \[sp, #?4128\]
++** ...
++*/
++int test1(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test2:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1040
++** str xzr, \[sp\]
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test2(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x);
++ }
++ g();
++ return 1;
++}
++
++/*
++** test3:
++** ...
++** str x30, \[sp\]
++** sub sp, sp, #1024
++** cbnz w0, .*
++** bl g
++** ...
++*/
++int test3(int z) {
++ __uint128_t x = 0;
++ int y[0x400];
++ if (z)
++ {
++ asm volatile ("" :::
++ "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26");
++ f(0, 0, 0, 0, 0, 0, 0, &y,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
++ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x);
++ }
++ g();
++ return 1;
++}
+--
+2.34.1
+
+
+From f2684e63652bb251d22c79e40081c646df1f36b6 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 8 Aug 2023 01:57:26 +0100
+Subject: [PATCH 09/10] aarch64: Simplify probe of final frame allocation
+
+Previous patches ensured that the final frame allocation only needs
+a probe when the size is strictly greater than 1KiB. It's therefore
+safe to use the normal 1024 probe offset in all cases.
+
+The main motivation for doing this is to simplify the code and
+remove the number of special cases.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Always probe the residual allocation at offset 1024, asserting
+ that that is in range.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
+ to be at offset 1024 rather than offset 0.
+ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 12 ++++--------
+ .../gcc.target/aarch64/stack-check-prologue-17.c | 2 +-
+ .../gcc.target/aarch64/stack-check-prologue-18.c | 7 +++++--
+ 3 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 1e8467fdd03..705f719a2ea 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -5695,16 +5695,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when the final adjustment is smaller
+- than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -5715,8 +5711,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+index 0d8a25d73a2..f0ec1389771 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -33,7 +33,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+index 82447d20fff..71d33ba34e9 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -8,8 +8,9 @@ void g();
+ ** test1:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #4064
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -49,8 +50,9 @@ int test1(int z) {
+ ** test2:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -77,6 +79,7 @@ int test2(int z) {
+ ** test3:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1024
+ ** cbnz w0, .*
+ ** bl g
+--
+2.34.1
+
+
+From bf3eeaa0182a92987570d9c787bd45079eebf528 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Thu, 15 Jun 2023 19:16:52 +0100
+Subject: [PATCH 10/10] aarch64: Make stack smash canary protect saved
+ registers
+
+AArch64 normally puts the saved registers near the bottom of the frame,
+immediately above any dynamic allocations. But this means that a
+stack-smash attack on those dynamic allocations could overwrite the
+saved registers without needing to reach as far as the stack smash
+canary.
+
+The same thing could also happen for variable-sized arguments that are
+passed by value, since those are allocated before a call and popped on
+return.
+
+This patch avoids that by putting the locals (and thus the canary) below
+the saved registers when stack smash protection is active.
+
+The patch fixes CVE-2023-4039.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_save_regs_above_locals_p):
+ New function.
+ (aarch64_layout_frame): Use it to decide whether locals should
+ go above or below the saved registers.
+ (aarch64_expand_prologue): Update stack layout comment.
+ Emit a stack tie after the final adjustment.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-protector-8.c: New test.
+ * gcc.target/aarch64/stack-protector-9.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 46 +++++++++++++--
+ .../gcc.target/aarch64/stack-protector-8.c | 58 +++++++++++++++++++
+ .../gcc.target/aarch64/stack-protector-9.c | 33 +++++++++++
+ 3 files changed, 133 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 705f719a2ea..3d094214fac 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -4622,6 +4622,20 @@ aarch64_needs_frame_chain (void)
+ return aarch64_use_frame_pointer;
+ }
+
++/* Return true if the current function should save registers above
++ the locals area, rather than below it. */
++
++static bool
++aarch64_save_regs_above_locals_p ()
++{
++ /* When using stack smash protection, make sure that the canary slot
++ comes between the locals and the saved registers. Otherwise,
++ it would be possible for a carefully sized smash attack to change
++ the saved registers (particularly LR and FP) without reaching the
++ canary. */
++ return crtl->stack_protect_guard;
++}
++
+ /* Mark the registers that need to be saved by the callee and calculate
+ the size of the callee-saved registers area and frame record (both FP
+ and LR may be omitted). */
+@@ -4686,6 +4700,16 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_below_hard_fp = crtl->outgoing_args_size;
+
++ bool regs_at_top_p = aarch64_save_regs_above_locals_p ();
++
++ if (regs_at_top_p)
++ {
++ cfun->machine->frame.bytes_below_hard_fp += get_frame_size ();
++ cfun->machine->frame.bytes_below_hard_fp
++ = aligned_upper_bound (cfun->machine->frame.bytes_below_hard_fp,
++ STACK_BOUNDARY / BITS_PER_UNIT);
++ }
++
+ #define ALLOCATE_GPR_SLOT(REGNO) \
+ do \
+ { \
+@@ -4758,9 +4782,11 @@ aarch64_layout_frame (void)
+ HOST_WIDE_INT varargs_and_saved_regs_size
+ = offset + cfun->machine->frame.saved_varargs_size;
+
++ cfun->machine->frame.bytes_above_hard_fp = varargs_and_saved_regs_size;
++ if (!regs_at_top_p)
++ cfun->machine->frame.bytes_above_hard_fp += get_frame_size ();
+ cfun->machine->frame.bytes_above_hard_fp
+- = aligned_upper_bound (varargs_and_saved_regs_size
+- + get_frame_size (),
++ = aligned_upper_bound (cfun->machine->frame.bytes_above_hard_fp,
+ STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Both these values are already aligned. */
+@@ -4772,6 +4798,9 @@ aarch64_layout_frame (void)
+
+ cfun->machine->frame.bytes_above_locals
+ = cfun->machine->frame.saved_varargs_size;
++ if (regs_at_top_p)
++ cfun->machine->frame.bytes_above_locals
++ += cfun->machine->frame.saved_regs_size;
+
+ cfun->machine->frame.initial_adjust = 0;
+ cfun->machine->frame.final_adjust = 0;
+@@ -5764,10 +5793,10 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ | for register varargs |
+ | |
+ +-------------------------------+
+- | local variables | <-- frame_pointer_rtx
++ | local variables (1) | <-- frame_pointer_rtx
+ | |
+ +-------------------------------+
+- | padding | \
++ | padding (1) | \
+ +-------------------------------+ |
+ | callee-saved registers | | frame.saved_regs_size
+ +-------------------------------+ |
+@@ -5775,6 +5804,10 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ +-------------------------------+ |
+ | FP' | / <- hard_frame_pointer_rtx (aligned)
+ +-------------------------------+
++ | local variables (2) |
++ +-------------------------------+
++ | padding (2) |
++ +-------------------------------+
+ | dynamic allocation |
+ +-------------------------------+
+ | padding |
+@@ -5784,6 +5817,9 @@ aarch64_add_cfa_expression (rtx_insn *insn, unsigned int reg,
+ +-------------------------------+
+ | | <-- stack_pointer_rtx (aligned)
+
++ The regions marked (1) and (2) are mutually exclusive. (2) is used
++ when aarch64_save_regs_above_locals_p is true.
++
+ Dynamic stack allocations via alloca() decrease stack_pointer_rtx
+ but leave frame_pointer_rtx and hard_frame_pointer_rtx
+ unchanged.
+@@ -5937,6 +5973,8 @@ aarch64_expand_prologue (void)
+ that is assumed by the called. */
+ aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
+ !frame_pointer_needed, true);
++ if (emit_frame_chain && maybe_ne (final_adjust, 0))
++ emit_insn (gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx));
+ }
+
+ /* Return TRUE if we can use a simple_return insn.
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+new file mode 100644
+index 00000000000..c5e7deef6c1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-8.c
+@@ -0,0 +1,58 @@
++/* { dg-options " -O -fstack-protector-strong -mstack-protector-guard=sysreg -mstack-protector-guard-reg=tpidr2_el0 -mstack-protector-guard-offset=16" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++void g(void *);
++
++/*
++** test1:
++** sub sp, sp, #288
++** stp x29, x30, \[sp, #?272\]
++** add x29, sp, #?272
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?264\]
++** mov \2, *0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** ldp x29, x30, \[sp, #?272\]
++** add sp, sp, #?288
++** ret
++** bl __stack_chk_fail
++*/
++int test1() {
++ int y[0x40];
++ g(y);
++ return 1;
++}
++
++/*
++** test2:
++** stp x29, x30, \[sp, #?-16\]!
++** mov x29, sp
++** sub sp, sp, #1040
++** mrs (x[0-9]+), tpidr2_el0
++** ldr (x[0-9]+), \[\1, #?16\]
++** str \2, \[sp, #?1032\]
++** mov \2, *0
++** add x0, sp, #?8
++** bl g
++** ...
++** mrs .*
++** ...
++** bne .*
++** ...
++** add sp, sp, #?1040
++** ldp x29, x30, \[sp\], #?16
++** ret
++** bl __stack_chk_fail
++*/
++int test2() {
++ int y[0x100];
++ g(y);
++ return 1;
++}
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+new file mode 100644
+index 00000000000..58f322aa480
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
+@@ -0,0 +1,33 @@
++/* { dg-options "-O2 -mcpu=neoverse-v1 -fstack-protector-all" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++** main:
++** ...
++** stp x29, x30, \[sp, #?-[0-9]+\]!
++** ...
++** sub sp, sp, #[0-9]+
++** ...
++** str x[0-9]+, \[x29, #?-8\]
++** ...
++*/
++int f(const char *);
++void g(void *);
++int main(int argc, char* argv[])
++{
++ int a;
++ int b;
++ char c[2+f(argv[1])];
++ int d[0x100];
++ char y;
++
++ y=42; a=4; b=10;
++ c[0] = 'h'; c[1] = '\0';
++
++ c[f(argv[2])] = '\0';
++
++ __builtin_printf("%d %d\n%s\n", a, b, c);
++ g(d);
++
++ return 0;
++}
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/gcc/gcc-common.inc b/meta/recipes-devtools/gcc/gcc-common.inc
index 629fa26dfe..69a3536965 100644
--- a/meta/recipes-devtools/gcc/gcc-common.inc
+++ b/meta/recipes-devtools/gcc/gcc-common.inc
@@ -100,7 +100,7 @@ BINV = "${PV}"
#S = "${WORKDIR}/gcc-${PV}"
S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}"
-B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
+B ?= "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}"
target_includedir ?= "${includedir}"
target_libdir ?= "${libdir}"
diff --git a/meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb b/meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb
index bf53c5cd78..bf53c5cd78 100644
--- a/meta/recipes-devtools/gcc/gcc-cross-canadian_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross-canadian_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-cross_9.3.bb b/meta/recipes-devtools/gcc/gcc-cross_9.5.bb
index b43cca0c52..b43cca0c52 100644
--- a/meta/recipes-devtools/gcc/gcc-cross_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-cross_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb b/meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb
index 40a6c4feff..40a6c4feff 100644
--- a/meta/recipes-devtools/gcc/gcc-crosssdk_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-crosssdk_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-runtime_9.3.bb b/meta/recipes-devtools/gcc/gcc-runtime_9.5.bb
index dd430b57eb..dd430b57eb 100644
--- a/meta/recipes-devtools/gcc/gcc-runtime_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-runtime_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb b/meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb
index f3c7058114..f3c7058114 100644
--- a/meta/recipes-devtools/gcc/gcc-sanitizers_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-sanitizers_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc-shared-source.inc b/meta/recipes-devtools/gcc/gcc-shared-source.inc
index aac4b49313..4baf7874d2 100644
--- a/meta/recipes-devtools/gcc/gcc-shared-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-shared-source.inc
@@ -9,3 +9,6 @@ SRC_URI = ""
do_configure[depends] += "gcc-source-${PV}:do_preconfigure"
do_populate_lic[depends] += "gcc-source-${PV}:do_unpack"
+
+# patch is available via gcc-source recipe
+CVE_CHECK_WHITELIST += "CVE-2023-4039"
diff --git a/meta/recipes-devtools/gcc/gcc-source.inc b/meta/recipes-devtools/gcc/gcc-source.inc
index 03bab97815..224b7778ef 100644
--- a/meta/recipes-devtools/gcc/gcc-source.inc
+++ b/meta/recipes-devtools/gcc/gcc-source.inc
@@ -18,6 +18,7 @@ INHIBIT_DEFAULT_DEPS = "1"
DEPENDS = ""
PACKAGES = ""
+B = "${WORKDIR}/build"
# This needs to be Python to avoid lots of shell variables becoming dependencies.
python do_preconfigure () {
diff --git a/meta/recipes-devtools/gcc/gcc-source_9.3.bb b/meta/recipes-devtools/gcc/gcc-source_9.5.bb
index b890fa33ea..b890fa33ea 100644
--- a/meta/recipes-devtools/gcc/gcc-source_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc-source_9.5.bb
diff --git a/meta/recipes-devtools/gcc/gcc_9.3.bb b/meta/recipes-devtools/gcc/gcc_9.5.bb
index 7d93590588..7d93590588 100644
--- a/meta/recipes-devtools/gcc/gcc_9.3.bb
+++ b/meta/recipes-devtools/gcc/gcc_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgcc-initial_9.3.bb b/meta/recipes-devtools/gcc/libgcc-initial_9.5.bb
index 0c698c26ec..0c698c26ec 100644
--- a/meta/recipes-devtools/gcc/libgcc-initial_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc-initial_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgcc_9.3.bb b/meta/recipes-devtools/gcc/libgcc_9.5.bb
index ea210a1130..ea210a1130 100644
--- a/meta/recipes-devtools/gcc/libgcc_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgcc_9.5.bb
diff --git a/meta/recipes-devtools/gcc/libgfortran_9.3.bb b/meta/recipes-devtools/gcc/libgfortran_9.5.bb
index 71dd8b4bdc..71dd8b4bdc 100644
--- a/meta/recipes-devtools/gcc/libgfortran_9.3.bb
+++ b/meta/recipes-devtools/gcc/libgfortran_9.5.bb
diff --git a/meta/recipes-devtools/gdb/gdb-9.1.inc b/meta/recipes-devtools/gdb/gdb-9.1.inc
index d019e6b384..212c554cf1 100644
--- a/meta/recipes-devtools/gdb/gdb-9.1.inc
+++ b/meta/recipes-devtools/gdb/gdb-9.1.inc
@@ -16,6 +16,7 @@ SRC_URI = "${GNU_MIRROR}/gdb/gdb-${PV}.tar.xz \
file://0009-resolve-restrict-keyword-conflict.patch \
file://0010-Fix-invalid-sigprocmask-call.patch \
file://0011-gdbserver-ctrl-c-handling.patch \
+ file://0012-CVE-2023-39128.patch \
"
SRC_URI[md5sum] = "f7e9f6236c425097d9e5f18a6ac40655"
SRC_URI[sha256sum] = "699e0ec832fdd2f21c8266171ea5bf44024bd05164fdf064e4d10cc4cf0d1737"
diff --git a/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch
new file mode 100644
index 0000000000..6445455bde
--- /dev/null
+++ b/meta/recipes-devtools/gdb/gdb/0012-CVE-2023-39128.patch
@@ -0,0 +1,75 @@
+From 033bc52bb6190393c8eed80925fa78cc35b40c6d Mon Sep 17 00:00:00 2001
+From: Tom Tromey <tromey@adacore.com>
+Date: Wed, 16 Aug 2023 11:29:19 -0600
+Subject: [PATCH] Avoid buffer overflow in ada_decode
+
+A bug report pointed out a buffer overflow in ada_decode, which Keith
+helpfully analyzed. ada_decode had a logic error when the input was
+all digits. While this isn't valid -- and would probably only appear
+in fuzzer tests -- it still should be handled properly.
+
+This patch adds a missing bounds check. Tested with the self-tests in
+an asan build.
+
+Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=30639
+Reviewed-by: Keith Seitz <keiths@redhat.com>
+
+Upstream-Status: Backport from [https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=033bc52bb6190393c8eed80925fa78cc35b40c6d]
+CVE: CVE-2023-39128
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ gdb/ada-lang.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/gdb/ada-lang.c b/gdb/ada-lang.c
+index 0c2d4fc..40852b6 100644
+--- a/gdb/ada-lang.c
++++ b/gdb/ada-lang.c
+@@ -56,6 +56,7 @@
+ #include "cli/cli-utils.h"
+ #include "gdbsupport/function-view.h"
+ #include "gdbsupport/byte-vector.h"
++#include "gdbsupport/selftest.h"
+ #include <algorithm>
+
+ /* Define whether or not the C operator '/' truncates towards zero for
+@@ -1184,7 +1185,7 @@ ada_decode (const char *encoded)
+ i -= 1;
+ if (i > 1 && encoded[i] == '_' && encoded[i - 1] == '_')
+ len0 = i - 1;
+- else if (encoded[i] == '$')
++ else if (i >= 0 && encoded[i] == '$')
+ len0 = i;
+ }
+
+@@ -1350,6 +1351,18 @@ Suppress:
+
+ }
+
++#ifdef GDB_SELF_TEST
++
++static void
++ada_decode_tests ()
++{
++ /* This isn't valid, but used to cause a crash. PR gdb/30639. The
++ result does not really matter very much. */
++ SELF_CHECK (ada_decode ("44") == "44");
++}
++
++#endif
++
+ /* Table for keeping permanent unique copies of decoded names. Once
+ allocated, names in this table are never released. While this is a
+ storage leak, it should not be significant unless there are massive
+@@ -14345,4 +14358,8 @@ DWARF attribute."),
+ gdb::observers::new_objfile.attach (ada_new_objfile_observer);
+ gdb::observers::free_objfile.attach (ada_free_objfile_observer);
+ gdb::observers::inferior_exit.attach (ada_inferior_exit);
++
++#ifdef GDB_SELF_TEST
++ selftests::register_test ("ada-decode", ada_decode_tests);
++#endif
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-23521.patch b/meta/recipes-devtools/git/files/CVE-2022-23521.patch
new file mode 100644
index 0000000000..974546013d
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-23521.patch
@@ -0,0 +1,367 @@
+From eb22e7dfa23da6bd9aed9bd1dad69e1e8e167d24 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:45:15 +0100
+Subject: [PATCH] CVE-2022-23521
+
+attr: fix overflow when upserting attribute with overly long name
+
+The function `git_attr_internal()` is called to upsert attributes into
+the global map. And while all callers pass a `size_t`, the function
+itself accepts an `int` as the attribute name's length. This can lead to
+an integer overflow in case the attribute name is longer than `INT_MAX`.
+
+Now this overflow seems harmless as the first thing we do is to call
+`attr_name_valid()`, and that function only succeeds in case all chars
+in the range of `namelen` match a certain small set of chars. We thus
+can't do an out-of-bounds read as NUL is not part of that set and all
+strings passed to this function are NUL-terminated. And furthermore, we
+wouldn't ever read past the current attribute name anyway due to the
+same reason. And if validation fails we will return early.
+
+On the other hand it feels fragile to rely on this behaviour, even more
+so given that we pass `namelen` to `FLEX_ALLOC_MEM()`. So let's instead
+just do the correct thing here and accept a `size_t` as line length.
+
+Upstream-Status: Backport [https://github.com/git/git/commit/eb22e7dfa23da6bd9aed9bd1dad69e1e8e167d24 &https://github.com/git/git/commit/8d0d48cf2157cfb914db1f53b3fe40785b86f3aa & https://github.com/git/git/commit/24557209500e6ed618f04a8795a111a0c491a29c & https://github.com/git/git/commit/34ace8bad02bb14ecc5b631f7e3daaa7a9bba7d9 & https://github.com/git/git/commit/447ac906e189535e77dcb1f4bbe3f1bc917d4c12 & https://github.com/git/git/commit/e1e12e97ac73ded85f7d000da1063a774b3cc14f & https://github.com/git/git/commit/a60a66e409c265b2944f18bf43581c146812586d & https://github.com/git/git/commit/d74b1fd54fdbc45966d12ea907dece11e072fb2b & https://github.com/git/git/commit/dfa6b32b5e599d97448337ed4fc18dd50c90758f & https://github.com/git/git/commit/3c50032ff5289cc45659f21949c8d09e52164579
+
+CVE: CVE-2022-23521
+
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ attr.c | 97 +++++++++++++++++++++++++++----------------
+ attr.h | 12 ++++++
+ t/t0003-attributes.sh | 59 ++++++++++++++++++++++++++
+ 3 files changed, 132 insertions(+), 36 deletions(-)
+
+diff --git a/attr.c b/attr.c
+index 11f19b5..63484ab 100644
+--- a/attr.c
++++ b/attr.c
+@@ -29,7 +29,7 @@ static const char git_attr__unknown[] = "(builtin)unknown";
+ #endif
+
+ struct git_attr {
+- int attr_nr; /* unique attribute number */
++ unsigned int attr_nr; /* unique attribute number */
+ char name[FLEX_ARRAY]; /* attribute name */
+ };
+
+@@ -221,7 +221,7 @@ static void report_invalid_attr(const char *name, size_t len,
+ * dictionary. If no entry is found, create a new attribute and store it in
+ * the dictionary.
+ */
+-static const struct git_attr *git_attr_internal(const char *name, int namelen)
++static const struct git_attr *git_attr_internal(const char *name, size_t namelen)
+ {
+ struct git_attr *a;
+
+@@ -237,8 +237,8 @@ static const struct git_attr *git_attr_internal(const char *name, int namelen)
+ a->attr_nr = hashmap_get_size(&g_attr_hashmap.map);
+
+ attr_hashmap_add(&g_attr_hashmap, a->name, namelen, a);
+- assert(a->attr_nr ==
+- (hashmap_get_size(&g_attr_hashmap.map) - 1));
++ if (a->attr_nr != hashmap_get_size(&g_attr_hashmap.map) - 1)
++ die(_("unable to add additional attribute"));
+ }
+
+ hashmap_unlock(&g_attr_hashmap);
+@@ -283,7 +283,7 @@ struct match_attr {
+ const struct git_attr *attr;
+ } u;
+ char is_macro;
+- unsigned num_attr;
++ size_t num_attr;
+ struct attr_state state[FLEX_ARRAY];
+ };
+
+@@ -300,7 +300,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp,
+ struct attr_state *e)
+ {
+ const char *ep, *equals;
+- int len;
++ size_t len;
+
+ ep = cp + strcspn(cp, blank);
+ equals = strchr(cp, '=');
+@@ -344,8 +344,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp,
+ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ int lineno, int macro_ok)
+ {
+- int namelen;
+- int num_attr, i;
++ size_t namelen, num_attr, i;
+ const char *cp, *name, *states;
+ struct match_attr *res = NULL;
+ int is_macro;
+@@ -356,6 +355,11 @@ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ return NULL;
+ name = cp;
+
++ if (strlen(line) >= ATTR_MAX_LINE_LENGTH) {
++ warning(_("ignoring overly long attributes line %d"), lineno);
++ return NULL;
++ }
++
+ if (*cp == '"' && !unquote_c_style(&pattern, name, &states)) {
+ name = pattern.buf;
+ namelen = pattern.len;
+@@ -392,10 +396,9 @@ static struct match_attr *parse_attr_line(const char *line, const char *src,
+ goto fail_return;
+ }
+
+- res = xcalloc(1,
+- sizeof(*res) +
+- sizeof(struct attr_state) * num_attr +
+- (is_macro ? 0 : namelen + 1));
++ res = xcalloc(1, st_add3(sizeof(*res),
++ st_mult(sizeof(struct attr_state), num_attr),
++ is_macro ? 0 : namelen + 1));
+ if (is_macro) {
+ res->u.attr = git_attr_internal(name, namelen);
+ } else {
+@@ -458,11 +461,12 @@ struct attr_stack {
+
+ static void attr_stack_free(struct attr_stack *e)
+ {
+- int i;
++ unsigned i;
+ free(e->origin);
+ for (i = 0; i < e->num_matches; i++) {
+ struct match_attr *a = e->attrs[i];
+- int j;
++ size_t j;
++
+ for (j = 0; j < a->num_attr; j++) {
+ const char *setto = a->state[j].setto;
+ if (setto == ATTR__TRUE ||
+@@ -671,8 +675,8 @@ static void handle_attr_line(struct attr_stack *res,
+ a = parse_attr_line(line, src, lineno, macro_ok);
+ if (!a)
+ return;
+- ALLOC_GROW(res->attrs, res->num_matches + 1, res->alloc);
+- res->attrs[res->num_matches++] = a;
++ ALLOC_GROW_BY(res->attrs, res->num_matches, 1, res->alloc);
++ res->attrs[res->num_matches - 1] = a;
+ }
+
+ static struct attr_stack *read_attr_from_array(const char **list)
+@@ -711,21 +715,37 @@ void git_attr_set_direction(enum git_attr_direction new_direction)
+
+ static struct attr_stack *read_attr_from_file(const char *path, int macro_ok)
+ {
++ struct strbuf buf = STRBUF_INIT;
+ FILE *fp = fopen_or_warn(path, "r");
+ struct attr_stack *res;
+- char buf[2048];
+ int lineno = 0;
++ int fd;
++ struct stat st;
+
+ if (!fp)
+ return NULL;
+- res = xcalloc(1, sizeof(*res));
+- while (fgets(buf, sizeof(buf), fp)) {
+- char *bufp = buf;
+- if (!lineno)
+- skip_utf8_bom(&bufp, strlen(bufp));
+- handle_attr_line(res, bufp, path, ++lineno, macro_ok);
++
++ fd = fileno(fp);
++ if (fstat(fd, &st)) {
++ warning_errno(_("cannot fstat gitattributes file '%s'"), path);
++ fclose(fp);
++ return NULL;
+ }
++ if (st.st_size >= ATTR_MAX_FILE_SIZE) {
++ warning(_("ignoring overly large gitattributes file '%s'"), path);
++ fclose(fp);
++ return NULL;
++ }
++
++ CALLOC_ARRAY(res, 1);
++ while (strbuf_getline(&buf, fp) != EOF) {
++ if (!lineno && starts_with(buf.buf, utf8_bom))
++ strbuf_remove(&buf, 0, strlen(utf8_bom));
++ handle_attr_line(res, buf.buf, path, ++lineno, macro_ok);
++ }
++
+ fclose(fp);
++ strbuf_release(&buf);
+ return res;
+ }
+
+@@ -736,13 +756,18 @@ static struct attr_stack *read_attr_from_index(const struct index_state *istate,
+ struct attr_stack *res;
+ char *buf, *sp;
+ int lineno = 0;
++ size_t size;
+
+ if (!istate)
+ return NULL;
+
+- buf = read_blob_data_from_index(istate, path, NULL);
++ buf = read_blob_data_from_index(istate, path, &size);
+ if (!buf)
+ return NULL;
++ if (size >= ATTR_MAX_FILE_SIZE) {
++ warning(_("ignoring overly large gitattributes blob '%s'"), path);
++ return NULL;
++ }
+
+ res = xcalloc(1, sizeof(*res));
+ for (sp = buf; *sp; ) {
+@@ -1012,12 +1037,12 @@ static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem);
+ static int fill_one(const char *what, struct all_attrs_item *all_attrs,
+ const struct match_attr *a, int rem)
+ {
+- int i;
++ size_t i;
+
+- for (i = a->num_attr - 1; rem > 0 && i >= 0; i--) {
+- const struct git_attr *attr = a->state[i].attr;
++ for (i = a->num_attr; rem > 0 && i > 0; i--) {
++ const struct git_attr *attr = a->state[i - 1].attr;
+ const char **n = &(all_attrs[attr->attr_nr].value);
+- const char *v = a->state[i].setto;
++ const char *v = a->state[i - 1].setto;
+
+ if (*n == ATTR__UNKNOWN) {
+ debug_set(what,
+@@ -1036,11 +1061,11 @@ static int fill(const char *path, int pathlen, int basename_offset,
+ struct all_attrs_item *all_attrs, int rem)
+ {
+ for (; rem > 0 && stack; stack = stack->prev) {
+- int i;
++ unsigned i;
+ const char *base = stack->origin ? stack->origin : "";
+
+- for (i = stack->num_matches - 1; 0 < rem && 0 <= i; i--) {
+- const struct match_attr *a = stack->attrs[i];
++ for (i = stack->num_matches; 0 < rem && 0 < i; i--) {
++ const struct match_attr *a = stack->attrs[i - 1];
+ if (a->is_macro)
+ continue;
+ if (path_matches(path, pathlen, basename_offset,
+@@ -1071,11 +1096,11 @@ static void determine_macros(struct all_attrs_item *all_attrs,
+ const struct attr_stack *stack)
+ {
+ for (; stack; stack = stack->prev) {
+- int i;
+- for (i = stack->num_matches - 1; i >= 0; i--) {
+- const struct match_attr *ma = stack->attrs[i];
++ unsigned i;
++ for (i = stack->num_matches; i > 0; i--) {
++ const struct match_attr *ma = stack->attrs[i - 1];
+ if (ma->is_macro) {
+- int n = ma->u.attr->attr_nr;
++ unsigned int n = ma->u.attr->attr_nr;
+ if (!all_attrs[n].macro) {
+ all_attrs[n].macro = ma;
+ }
+@@ -1127,7 +1152,7 @@ void git_check_attr(const struct index_state *istate,
+ collect_some_attrs(istate, path, check);
+
+ for (i = 0; i < check->nr; i++) {
+- size_t n = check->items[i].attr->attr_nr;
++ unsigned int n = check->items[i].attr->attr_nr;
+ const char *value = check->all_attrs[n].value;
+ if (value == ATTR__UNKNOWN)
+ value = ATTR__UNSET;
+diff --git a/attr.h b/attr.h
+index b0378bf..f424285 100644
+--- a/attr.h
++++ b/attr.h
+@@ -1,6 +1,18 @@
+ #ifndef ATTR_H
+ #define ATTR_H
+
++/**
++ * The maximum line length for a gitattributes file. If the line exceeds this
++ * length we will ignore it.
++ */
++#define ATTR_MAX_LINE_LENGTH 2048
++
++ /**
++ * The maximum size of the giattributes file. If the file exceeds this size we
++ * will ignore it.
++ */
++#define ATTR_MAX_FILE_SIZE (100 * 1024 * 1024)
++
+ struct index_state;
+
+ /* An attribute is a pointer to this opaque structure */
+diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh
+index 71e63d8..556245b 100755
+--- a/t/t0003-attributes.sh
++++ b/t/t0003-attributes.sh
+@@ -342,4 +342,63 @@ test_expect_success 'query binary macro directly' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'large attributes line ignored in tree' '
++ test_when_finished "rm .gitattributes" &&
++ printf "path %02043d" 1 >.gitattributes &&
++ git check-attr --all path >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success 'large attributes line ignores trailing content in tree' '
++ test_when_finished "rm .gitattributes" &&
++ # older versions of Git broke lines at 2048 bytes; the 2045 bytes
++ # of 0-padding here is accounting for the three bytes of "a 1", which
++ # would knock "trailing" to the "next" line, where it would be
++ # erroneously parsed.
++ printf "a %02045dtrailing attribute\n" 1 >.gitattributes &&
++ git check-attr --all trailing >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success EXPENSIVE 'large attributes file ignored in tree' '
++ test_when_finished "rm .gitattributes" &&
++ dd if=/dev/zero of=.gitattributes bs=101M count=1 2>/dev/null &&
++ git check-attr --all path >/dev/null 2>err &&
++ echo "warning: ignoring overly large gitattributes file ${SQ}.gitattributes${SQ}" >expect &&
++ test_cmp expect err
++'
++
++test_expect_success 'large attributes line ignored in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(printf "path %02043d" 1 | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all path >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success 'large attributes line ignores trailing content in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(printf "a %02045dtrailing attribute\n" 1 | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all trailing >actual 2>err &&
++ echo "warning: ignoring overly long attributes line 1" >expect &&
++ test_cmp expect err &&
++ test_must_be_empty actual
++'
++
++test_expect_success EXPENSIVE 'large attributes file ignored in index' '
++ test_when_finished "git update-index --remove .gitattributes" &&
++ blob=$(dd if=/dev/zero bs=101M count=1 2>/dev/null | git hash-object -w --stdin) &&
++ git update-index --add --cacheinfo 100644,$blob,.gitattributes &&
++ git check-attr --cached --all path >/dev/null 2>err &&
++ echo "warning: ignoring overly large gitattributes blob ${SQ}.gitattributes${SQ}" >expect &&
++ test_cmp expect err
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch
new file mode 100644
index 0000000000..87091abd47
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-01.patch
@@ -0,0 +1,39 @@
+From a244dc5b0a629290881641467c7a545de7508ab2 Mon Sep 17 00:00:00 2001
+From: Carlo Marcelo Arenas Belón <carenas@gmail.com>
+Date: Tue, 2 Nov 2021 15:46:06 +0000
+Subject: [PATCH 01/12] test-lib: add prerequisite for 64-bit platforms
+
+Allow tests that assume a 64-bit `size_t` to be skipped in 32-bit
+platforms and regardless of the size of `long`.
+
+This imitates the `LONG_IS_64BIT` prerequisite.
+
+Signed-off-by: Carlo Marcelo Arenas Belón <carenas@gmail.com>
+Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/a244dc5b0a629290881641467c7a545de7508ab2]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/test-lib.sh | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/t/test-lib.sh b/t/test-lib.sh
+index e06fa02..db5ec2f 100644
+--- a/t/test-lib.sh
++++ b/t/test-lib.sh
+@@ -1613,6 +1613,10 @@ build_option () {
+ sed -ne "s/^$1: //p"
+ }
+
++test_lazy_prereq SIZE_T_IS_64BIT '
++ test 8 -eq "$(build_option sizeof-size_t)"
++'
++
+ test_lazy_prereq LONG_IS_64BIT '
+ test 8 -le "$(build_option sizeof-long)"
+ '
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch
new file mode 100644
index 0000000000..f35e55b585
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-02.patch
@@ -0,0 +1,187 @@
+From 81dc898df9b4b4035534a927f3234a3839b698bf Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:25 +0100
+Subject: [PATCH 02/12] pretty: fix out-of-bounds write caused by integer overflow
+
+When using a padding specifier in the pretty format passed to git-log(1)
+we need to calculate the string length in several places. These string
+lengths are stored in `int`s though, which means that these can easily
+overflow when the input lengths exceeds 2GB. This can ultimately lead to
+an out-of-bounds write when these are used in a call to memcpy(3P):
+
+ ==8340==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7f1ec62f97fe at pc 0x7f2127e5f427 bp 0x7ffd3bd63de0 sp 0x7ffd3bd63588
+ WRITE of size 1 at 0x7f1ec62f97fe thread T0
+ #0 0x7f2127e5f426 in __interceptor_memcpy /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827
+ #1 0x5628e96aa605 in format_and_pad_commit pretty.c:1762
+ #2 0x5628e96aa7f4 in format_commit_item pretty.c:1801
+ #3 0x5628e97cdb24 in strbuf_expand strbuf.c:429
+ #4 0x5628e96ab060 in repo_format_commit_message pretty.c:1869
+ #5 0x5628e96acd0f in pretty_print_commit pretty.c:2161
+ #6 0x5628e95a44c8 in show_log log-tree.c:781
+ #7 0x5628e95a76ba in log_tree_commit log-tree.c:1117
+ #8 0x5628e922bed5 in cmd_log_walk_no_free builtin/log.c:508
+ #9 0x5628e922c35b in cmd_log_walk builtin/log.c:549
+ #10 0x5628e922f1a2 in cmd_log builtin/log.c:883
+ #11 0x5628e9106993 in run_builtin git.c:466
+ #12 0x5628e9107397 in handle_builtin git.c:721
+ #13 0x5628e9107b07 in run_argv git.c:788
+ #14 0x5628e91088a7 in cmd_main git.c:923
+ #15 0x5628e939d682 in main common-main.c:57
+ #16 0x7f2127c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #17 0x7f2127c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #18 0x5628e91020e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x7f1ec62f97fe is located 2 bytes to the left of 4831838265-byte region [0x7f1ec62f9800,0x7f1fe62f9839)
+ allocated by thread T0 here:
+ #0 0x7f2127ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x5628e98774d4 in xrealloc wrapper.c:136
+ #2 0x5628e97cb01c in strbuf_grow strbuf.c:99
+ #3 0x5628e97ccd42 in strbuf_addchars strbuf.c:327
+ #4 0x5628e96aa55c in format_and_pad_commit pretty.c:1761
+ #5 0x5628e96aa7f4 in format_commit_item pretty.c:1801
+ #6 0x5628e97cdb24 in strbuf_expand strbuf.c:429
+ #7 0x5628e96ab060 in repo_format_commit_message pretty.c:1869
+ #8 0x5628e96acd0f in pretty_print_commit pretty.c:2161
+ #9 0x5628e95a44c8 in show_log log-tree.c:781
+ #10 0x5628e95a76ba in log_tree_commit log-tree.c:1117
+ #11 0x5628e922bed5 in cmd_log_walk_no_free builtin/log.c:508
+ #12 0x5628e922c35b in cmd_log_walk builtin/log.c:549
+ #13 0x5628e922f1a2 in cmd_log builtin/log.c:883
+ #14 0x5628e9106993 in run_builtin git.c:466
+ #15 0x5628e9107397 in handle_builtin git.c:721
+ #16 0x5628e9107b07 in run_argv git.c:788
+ #17 0x5628e91088a7 in cmd_main git.c:923
+ #18 0x5628e939d682 in main common-main.c:57
+ #19 0x7f2127c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #20 0x7f2127c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #21 0x5628e91020e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827 in __interceptor_memcpy
+ Shadow bytes around the buggy address:
+ 0x0fe458c572a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572d0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0fe458c572e0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ =>0x0fe458c572f0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa[fa]
+ 0x0fe458c57300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57310: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57320: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57330: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 0x0fe458c57340: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==8340==ABORTING
+
+The pretty format can also be used in `git archive` operations via the
+`export-subst` attribute. So this is what in our opinion makes this a
+critical issue in the context of Git forges which allow to download an
+archive of user supplied Git repositories.
+
+Fix this vulnerability by using `size_t` instead of `int` to track the
+string lengths. Add tests which detect this vulnerability when Git is
+compiled with the address sanitizer.
+
+Reported-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Original-patch-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Modified-by: Taylor Blau <me@ttalorr.com>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/81dc898df9b4b4035534a927f3234a3839b698bf]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 11 ++++++-----
+ t/t4205-log-pretty-formats.sh | 17 +++++++++++++++++
+ 2 files changed, 23 insertions(+), 5 deletions(-)
+
+diff --git a/pretty.c b/pretty.c
+index b32f036..637e344 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1427,7 +1427,9 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ struct format_commit_context *c)
+ {
+ struct strbuf local_sb = STRBUF_INIT;
+- int total_consumed = 0, len, padding = c->padding;
++ size_t total_consumed = 0;
++ int len, padding = c->padding;
++
+ if (padding < 0) {
+ const char *start = strrchr(sb->buf, '\n');
+ int occupied;
+@@ -1439,7 +1441,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ }
+ while (1) {
+ int modifier = *placeholder == 'C';
+- int consumed = format_commit_one(&local_sb, placeholder, c);
++ size_t consumed = format_commit_one(&local_sb, placeholder, c);
+ total_consumed += consumed;
+
+ if (!modifier)
+@@ -1505,7 +1507,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ }
+ strbuf_addbuf(sb, &local_sb);
+ } else {
+- int sb_len = sb->len, offset = 0;
++ size_t sb_len = sb->len, offset = 0;
+ if (c->flush_type == flush_left)
+ offset = padding - len;
+ else if (c->flush_type == flush_both)
+@@ -1528,8 +1530,7 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */
+ const char *placeholder,
+ void *context)
+ {
+- int consumed;
+- size_t orig_len;
++ size_t consumed, orig_len;
+ enum {
+ NO_MAGIC,
+ ADD_LF_BEFORE_NON_EMPTY,
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index f42a69f..a2acee1 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -788,4 +788,21 @@ test_expect_success '%S in git log --format works with other placeholders (part
+ test_cmp expect actual
+ '
+
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
++ # We only assert that this command does not crash. This needs to be
++ # executed with the address sanitizer to demonstrate failure.
++ git log -1 --pretty="format:%>(2147483646)%x41%41%>(2147483646)%x41" >/dev/null
++'
++
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'set up huge commit' '
++ test-tool genzeros 2147483649 | tr "\000" "1" >expect &&
++ huge_commit=$(git commit-tree -F expect HEAD^{tree})
++'
++
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
++ git log -1 --format="%B%<(1)%x30" $huge_commit >actual &&
++ echo 0 >>expect &&
++ test_cmp expect actual
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch
new file mode 100644
index 0000000000..d83d77eaf7
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-03.patch
@@ -0,0 +1,146 @@
+From b49f309aa16febeddb65e82526640a91bbba3be3 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:30 +0100
+Subject: [PATCH 03/12] pretty: fix out-of-bounds read when left-flushing with stealing
+
+With the `%>>(<N>)` pretty formatter, you can ask git-log(1) et al to
+steal spaces. To do so we need to look ahead of the next token to see
+whether there are spaces there. This loop takes into account ANSI
+sequences that end with an `m`, and if it finds any it will skip them
+until it finds the first space. While doing so it does not take into
+account the buffer's limits though and easily does an out-of-bounds
+read.
+
+Add a test that hits this behaviour. While we don't have an easy way to
+verify this, the test causes the following failure when run with
+`SANITIZE=address`:
+
+ ==37941==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000000baf at pc 0x55ba6f88e0d0 bp 0x7ffc84c50d20 sp 0x7ffc84c50d10
+ READ of size 1 at 0x603000000baf thread T0
+ #0 0x55ba6f88e0cf in format_and_pad_commit pretty.c:1712
+ #1 0x55ba6f88e7b4 in format_commit_item pretty.c:1801
+ #2 0x55ba6f9b1ae4 in strbuf_expand strbuf.c:429
+ #3 0x55ba6f88f020 in repo_format_commit_message pretty.c:1869
+ #4 0x55ba6f890ccf in pretty_print_commit pretty.c:2161
+ #5 0x55ba6f7884c8 in show_log log-tree.c:781
+ #6 0x55ba6f78b6ba in log_tree_commit log-tree.c:1117
+ #7 0x55ba6f40fed5 in cmd_log_walk_no_free builtin/log.c:508
+ #8 0x55ba6f41035b in cmd_log_walk builtin/log.c:549
+ #9 0x55ba6f4131a2 in cmd_log builtin/log.c:883
+ #10 0x55ba6f2ea993 in run_builtin git.c:466
+ #11 0x55ba6f2eb397 in handle_builtin git.c:721
+ #12 0x55ba6f2ebb07 in run_argv git.c:788
+ #13 0x55ba6f2ec8a7 in cmd_main git.c:923
+ #14 0x55ba6f581682 in main common-main.c:57
+ #15 0x7f2d08c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #16 0x7f2d08c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #17 0x55ba6f2e60e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x603000000baf is located 1 bytes to the left of 24-byte region [0x603000000bb0,0x603000000bc8)
+ allocated by thread T0 here:
+ #0 0x7f2d08ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x55ba6fa5b494 in xrealloc wrapper.c:136
+ #2 0x55ba6f9aefdc in strbuf_grow strbuf.c:99
+ #3 0x55ba6f9b0a06 in strbuf_add strbuf.c:298
+ #4 0x55ba6f9b1a25 in strbuf_expand strbuf.c:418
+ #5 0x55ba6f88f020 in repo_format_commit_message pretty.c:1869
+ #6 0x55ba6f890ccf in pretty_print_commit pretty.c:2161
+ #7 0x55ba6f7884c8 in show_log log-tree.c:781
+ #8 0x55ba6f78b6ba in log_tree_commit log-tree.c:1117
+ #9 0x55ba6f40fed5 in cmd_log_walk_no_free builtin/log.c:508
+ #10 0x55ba6f41035b in cmd_log_walk builtin/log.c:549
+ #11 0x55ba6f4131a2 in cmd_log builtin/log.c:883
+ #12 0x55ba6f2ea993 in run_builtin git.c:466
+ #13 0x55ba6f2eb397 in handle_builtin git.c:721
+ #14 0x55ba6f2ebb07 in run_argv git.c:788
+ #15 0x55ba6f2ec8a7 in cmd_main git.c:923
+ #16 0x55ba6f581682 in main common-main.c:57
+ #17 0x7f2d08c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #18 0x7f2d08c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #19 0x55ba6f2e60e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow pretty.c:1712 in format_and_pad_commit
+ Shadow bytes around the buggy address:
+ 0x0c067fff8120: fa fa fd fd fd fa fa fa fd fd fd fa fa fa fd fd
+ 0x0c067fff8130: fd fd fa fa fd fd fd fd fa fa fd fd fd fa fa fa
+ 0x0c067fff8140: fd fd fd fa fa fa fd fd fd fa fa fa fd fd fd fa
+ 0x0c067fff8150: fa fa fd fd fd fd fa fa 00 00 00 fa fa fa fd fd
+ 0x0c067fff8160: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa
+ =>0x0c067fff8170: fd fd fd fa fa[fa]00 00 00 fa fa fa 00 00 00 fa
+ 0x0c067fff8180: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8190: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff81c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+
+Luckily enough, this would only cause us to copy the out-of-bounds data
+into the formatted commit in case we really had an ANSI sequence
+preceding our buffer. So this bug likely has no security consequences.
+
+Fix it regardless by not traversing past the buffer's start.
+
+Reported-by: Patrick Steinhardt <ps@pks.im>
+Reported-by: Eric Sesterhenn <eric.sesterhenn@x41-dsec.de>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/b49f309aa16febeddb65e82526640a91bbba3be3]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 2 +-
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index 637e344..4348a82 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1468,7 +1468,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ if (*ch != 'm')
+ break;
+ p = ch - 1;
+- while (ch - p < 10 && *p != '\033')
++ while (p > sb->buf && ch - p < 10 && *p != '\033')
+ p--;
+ if (*p != '\033' ||
+ ch + 1 - p != display_mode_esc_sequence_len(p))
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index a2acee1..e69caba 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -788,6 +788,12 @@ test_expect_success '%S in git log --format works with other placeholders (part
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with space stealing' '
++ printf mm0 >expect &&
++ git log -1 --pretty="format:mm%>>|(1)%x30" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch
new file mode 100644
index 0000000000..9e3c74ff67
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-04.patch
@@ -0,0 +1,150 @@
+From f6e0b9f38987ad5e47bab551f8760b70689a5905 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:34 +0100
+Subject: [PATCH 04/12] pretty: fix out-of-bounds read when parsing invalid padding format
+
+An out-of-bounds read can be triggered when parsing an incomplete
+padding format string passed via `--pretty=format` or in Git archives
+when files are marked with the `export-subst` gitattribute.
+
+This bug exists since we have introduced support for truncating output
+via the `trunc` keyword a7f01c6 (pretty: support truncating in %>, %<
+and %><, 2013-04-19). Before this commit, we used to find the end of the
+formatting string by using strchr(3P). This function returns a `NULL`
+pointer in case the character in question wasn't found. The subsequent
+check whether any character was found thus simply checked the returned
+pointer. After the commit we switched to strcspn(3P) though, which only
+returns the offset to the first found character or to the trailing NUL
+byte. As the end pointer is now computed by adding the offset to the
+start pointer it won't be `NULL` anymore, and as a consequence the check
+doesn't do anything anymore.
+
+The out-of-bounds data that is being read can in fact end up in the
+formatted string. As a consequence, it is possible to leak memory
+contents either by calling git-log(1) or via git-archive(1) when any of
+the archived files is marked with the `export-subst` gitattribute.
+
+ ==10888==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x602000000398 at pc 0x7f0356047cb2 bp 0x7fff3ffb95d0 sp 0x7fff3ffb8d78
+ READ of size 1 at 0x602000000398 thread T0
+ #0 0x7f0356047cb1 in __interceptor_strchrnul /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:725
+ #1 0x563b7cec9a43 in strbuf_expand strbuf.c:417
+ #2 0x563b7cda7060 in repo_format_commit_message pretty.c:1869
+ #3 0x563b7cda8d0f in pretty_print_commit pretty.c:2161
+ #4 0x563b7cca04c8 in show_log log-tree.c:781
+ #5 0x563b7cca36ba in log_tree_commit log-tree.c:1117
+ #6 0x563b7c927ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #7 0x563b7c92835b in cmd_log_walk builtin/log.c:549
+ #8 0x563b7c92b1a2 in cmd_log builtin/log.c:883
+ #9 0x563b7c802993 in run_builtin git.c:466
+ #10 0x563b7c803397 in handle_builtin git.c:721
+ #11 0x563b7c803b07 in run_argv git.c:788
+ #12 0x563b7c8048a7 in cmd_main git.c:923
+ #13 0x563b7ca99682 in main common-main.c:57
+ #14 0x7f0355e3c28f (/usr/lib/libc.so.6+0x2328f)
+ #15 0x7f0355e3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #16 0x563b7c7fe0e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x602000000398 is located 0 bytes to the right of 8-byte region [0x602000000390,0x602000000398)
+ allocated by thread T0 here:
+ #0 0x7f0356072faa in __interceptor_strdup /usr/src/debug/gcc/libsanitizer/asan/asan_interceptors.cpp:439
+ #1 0x563b7cf7317c in xstrdup wrapper.c:39
+ #2 0x563b7cd9a06a in save_user_format pretty.c:40
+ #3 0x563b7cd9b3e5 in get_commit_format pretty.c:173
+ #4 0x563b7ce54ea0 in handle_revision_opt revision.c:2456
+ #5 0x563b7ce597c9 in setup_revisions revision.c:2850
+ #6 0x563b7c9269e0 in cmd_log_init_finish builtin/log.c:269
+ #7 0x563b7c927362 in cmd_log_init builtin/log.c:348
+ #8 0x563b7c92b193 in cmd_log builtin/log.c:882
+ #9 0x563b7c802993 in run_builtin git.c:466
+ #10 0x563b7c803397 in handle_builtin git.c:721
+ #11 0x563b7c803b07 in run_argv git.c:788
+ #12 0x563b7c8048a7 in cmd_main git.c:923
+ #13 0x563b7ca99682 in main common-main.c:57
+ #14 0x7f0355e3c28f (/usr/lib/libc.so.6+0x2328f)
+ #15 0x7f0355e3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #16 0x563b7c7fe0e4 in _start ../sysdeps/x86_64/start.S:115
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:725 in __interceptor_strchrnul
+ Shadow bytes around the buggy address:
+ 0x0c047fff8020: fa fa fd fd fa fa 00 06 fa fa 05 fa fa fa fd fd
+ 0x0c047fff8030: fa fa 00 02 fa fa 06 fa fa fa 05 fa fa fa fd fd
+ 0x0c047fff8040: fa fa 00 07 fa fa 03 fa fa fa fd fd fa fa 00 00
+ 0x0c047fff8050: fa fa 00 01 fa fa fd fd fa fa 00 00 fa fa 00 01
+ 0x0c047fff8060: fa fa 00 06 fa fa 00 06 fa fa 05 fa fa fa 05 fa
+ =>0x0c047fff8070: fa fa 00[fa]fa fa fd fa fa fa fd fd fa fa fd fd
+ 0x0c047fff8080: fa fa fd fd fa fa 00 00 fa fa 00 fa fa fa fd fa
+ 0x0c047fff8090: fa fa fd fd fa fa 00 00 fa fa fa fa fa fa fa fa
+ 0x0c047fff80a0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c047fff80b0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c047fff80c0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==10888==ABORTING
+
+Fix this bug by checking whether `end` points at the trailing NUL byte.
+Add a test which catches this out-of-bounds read and which demonstrates
+that we used to write out-of-bounds data into the formatted message.
+
+Reported-by: Markus Vervier <markus.vervier@x41-dsec.de>
+Original-patch-by: Markus Vervier <markus.vervier@x41-dsec.de>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/f6e0b9f38987ad5e47bab551f8760b70689a5905]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 2 +-
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index 4348a82..c49e818 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1024,7 +1024,7 @@ static size_t parse_padding_placeholder(const char *placeholder,
+ const char *end = start + strcspn(start, ",)");
+ char *next;
+ int width;
+- if (!end || end == start)
++ if (!*end || end == start)
+ return 0;
+ width = strtol(start, &next, 10);
+ if (next == start || width == 0)
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index e69caba..8a349df 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -794,6 +794,12 @@ test_expect_success 'log --pretty with space stealing' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with invalid padding format' '
++ printf "%s%%<(20" "$(git rev-parse HEAD)" >expect &&
++ git log -1 --pretty="format:%H%<(20" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch
new file mode 100644
index 0000000000..994f7a55b1
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-05.patch
@@ -0,0 +1,98 @@
+From 1de69c0cdd388b0a5b7bdde0bfa0bda514a354b0 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:39 +0100
+Subject: [PATCH 05/12] pretty: fix adding linefeed when placeholder is not expanded
+
+When a formatting directive has a `+` or ` ` after the `%`, then we add
+either a line feed or space if the placeholder expands to a non-empty
+string. In specific cases though this logic doesn't work as expected,
+and we try to add the character even in the case where the formatting
+directive is empty.
+
+One such pattern is `%w(1)%+d%+w(2)`. `%+d` expands to reference names
+pointing to a certain commit, like in `git log --decorate`. For a tagged
+commit this would for example expand to `\n (tag: v1.0.0)`, which has a
+leading newline due to the `+` modifier and a space added by `%d`. Now
+the second wrapping directive will cause us to rewrap the text to
+`\n(tag:\nv1.0.0)`, which is one byte shorter due to the missing leading
+space. The code that handles the `+` magic now notices that the length
+has changed and will thus try to insert a leading line feed at the
+original posititon. But as the string was shortened, the original
+position is past the buffer's boundary and thus we die with an error.
+
+Now there are two issues here:
+
+ 1. We check whether the buffer length has changed, not whether it
+ has been extended. This causes us to try and add the character
+ past the string boundary.
+
+ 2. The current logic does not make any sense whatsoever. When the
+ string got expanded due to the rewrap, putting the separator into
+ the original position is likely to put it somewhere into the
+ middle of the rewrapped contents.
+
+It is debatable whether `%+w()` makes any sense in the first place.
+Strictly speaking, the placeholder never expands to a non-empty string,
+and consequentially we shouldn't ever accept this combination. We thus
+fix the bug by simply refusing `%+w()`.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/1de69c0cdd388b0a5b7bdde0bfa0bda514a354b0]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 14 +++++++++++++-
+ t/t4205-log-pretty-formats.sh | 8 ++++++++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/pretty.c b/pretty.c
+index c49e818..195d005 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1551,9 +1551,21 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */
+ default:
+ break;
+ }
+- if (magic != NO_MAGIC)
++ if (magic != NO_MAGIC) {
+ placeholder++;
+
++ switch (placeholder[0]) {
++ case 'w':
++ /*
++ * `%+w()` cannot ever expand to a non-empty string,
++ * and it potentially changes the layout of preceding
++ * contents. We're thus not able to handle the magic in
++ * this combination and refuse the pattern.
++ */
++ return 0;
++ };
++ }
++
+ orig_len = sb->len;
+ if (((struct format_commit_context *)context)->flush_type != no_flush)
+ consumed = format_and_pad_commit(sb, placeholder, context);
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 8a349df..fa1bc2b 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -800,6 +800,14 @@ test_expect_success 'log --pretty with invalid padding format' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty with magical wrapping directives' '
++ commit_id=$(git commit-tree HEAD^{tree} -m "describe me") &&
++ git tag describe-me $commit_id &&
++ printf "\n(tag:\ndescribe-me)%%+w(2)" >expect &&
++ git log -1 --pretty="format:%w(1)%+d%+w(2)" $commit_id >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch
new file mode 100644
index 0000000000..93fbe5c7fe
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-06.patch
@@ -0,0 +1,90 @@
+From 48050c42c73c28b0c001d63d11dffac7e116847b Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:49 +0100
+Subject: [PATCH 06/12] pretty: fix integer overflow in wrapping format
+
+The `%w(width,indent1,indent2)` formatting directive can be used to
+rewrap text to a specific width and is designed after git-shortlog(1)'s
+`-w` parameter. While the three parameters are all stored as `size_t`
+internally, `strbuf_add_wrapped_text()` accepts integers as input. As a
+result, the casted integers may overflow. As these now-negative integers
+are later on passed to `strbuf_addchars()`, we will ultimately run into
+implementation-defined behaviour due to casting a negative number back
+to `size_t` again. On my platform, this results in trying to allocate
+9000 petabyte of memory.
+
+Fix this overflow by using `cast_size_t_to_int()` so that we reject
+inputs that cannot be represented as an integer.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/48050c42c73c28b0c001d63d11dffac7e116847b]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ git-compat-util.h | 8 ++++++++
+ pretty.c | 4 +++-
+ t/t4205-log-pretty-formats.sh | 12 ++++++++++++
+ 3 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/git-compat-util.h b/git-compat-util.h
+index a1ecfd3..b0f3890 100644
+--- a/git-compat-util.h
++++ b/git-compat-util.h
+@@ -854,6 +854,14 @@ static inline size_t st_sub(size_t a, size_t b)
+ return a - b;
+ }
+
++static inline int cast_size_t_to_int(size_t a)
++{
++ if (a > INT_MAX)
++ die("number too large to represent as int on this platform: %"PRIuMAX,
++ (uintmax_t)a);
++ return (int)a;
++}
++
+ #ifdef HAVE_ALLOCA_H
+ # include <alloca.h>
+ # define xalloca(size) (alloca(size))
+diff --git a/pretty.c b/pretty.c
+index 195d005..ff9fc97 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -898,7 +898,9 @@ static void strbuf_wrap(struct strbuf *sb, size_t pos,
+ if (pos)
+ strbuf_add(&tmp, sb->buf, pos);
+ strbuf_add_wrapped_text(&tmp, sb->buf + pos,
+- (int) indent1, (int) indent2, (int) width);
++ cast_size_t_to_int(indent1),
++ cast_size_t_to_int(indent2),
++ cast_size_t_to_int(width));
+ strbuf_swap(&tmp, sb);
+ strbuf_release(&tmp);
+ }
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index fa1bc2b..23ac508 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -808,6 +808,18 @@ test_expect_success 'log --pretty with magical wrapping directives' '
+ test_cmp expect actual
+ '
+
++test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' '
++ cat >expect <<-EOF &&
++ fatal: number too large to represent as int on this platform: 2147483649
++ EOF
++ test_must_fail git log -1 --pretty="format:%w(2147483649,1,1)%d" 2>error &&
++ test_cmp expect error &&
++ test_must_fail git log -1 --pretty="format:%w(1,2147483649,1)%d" 2>error &&
++ test_cmp expect error &&
++ test_must_fail git log -1 --pretty="format:%w(1,1,2147483649)%d" 2>error &&
++ test_cmp expect error
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch
new file mode 100644
index 0000000000..ec248ad6c2
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-07.patch
@@ -0,0 +1,123 @@
+From 522cc87fdc25449222a5894a428eebf4b8d5eaa9 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:46:53 +0100
+Subject: [PATCH 07/12] utf8: fix truncated string lengths in utf8_strnwidth()
+
+The `utf8_strnwidth()` function accepts an optional string length as
+input parameter. This parameter can either be set to `-1`, in which case
+we call `strlen()` on the input. Or it can be set to a positive integer
+that indicates a precomputed length, which callers typically compute by
+calling `strlen()` at some point themselves.
+
+The input parameter is an `int` though, whereas `strlen()` returns a
+`size_t`. This can lead to implementation-defined behaviour though when
+the `size_t` cannot be represented by the `int`. In the general case
+though this leads to wrap-around and thus to negative string sizes,
+which is sure enough to not lead to well-defined behaviour.
+
+Fix this by accepting a `size_t` instead of an `int` as string length.
+While this takes away the ability of callers to simply pass in `-1` as
+string length, it really is trivial enough to convert them to instead
+pass in `strlen()` instead.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/522cc87fdc25449222a5894a428eebf4b8d5eaa9]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ column.c | 2 +-
+ pretty.c | 4 ++--
+ utf8.c | 8 +++-----
+ utf8.h | 2 +-
+ 4 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/column.c b/column.c
+index 4a38eed..0c79850 100644
+--- a/column.c
++++ b/column.c
+@@ -23,7 +23,7 @@ struct column_data {
+ /* return length of 's' in letters, ANSI escapes stripped */
+ static int item_length(const char *s)
+ {
+- return utf8_strnwidth(s, -1, 1);
++ return utf8_strnwidth(s, strlen(s), 1);
+ }
+
+ /*
+diff --git a/pretty.c b/pretty.c
+index ff9fc97..c3c1443 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -1437,7 +1437,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ int occupied;
+ if (!start)
+ start = sb->buf;
+- occupied = utf8_strnwidth(start, -1, 1);
++ occupied = utf8_strnwidth(start, strlen(start), 1);
+ occupied += c->pretty_ctx->graph_width;
+ padding = (-padding) - occupied;
+ }
+@@ -1455,7 +1455,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */
+ placeholder++;
+ total_consumed++;
+ }
+- len = utf8_strnwidth(local_sb.buf, -1, 1);
++ len = utf8_strnwidth(local_sb.buf, local_sb.len, 1);
+
+ if (c->flush_type == flush_left_and_steal) {
+ const char *ch = sb->buf + sb->len - 1;
+diff --git a/utf8.c b/utf8.c
+index 5c8f151..a66984b 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -206,13 +206,11 @@ int utf8_width(const char **start, size_t *remainder_p)
+ * string, assuming that the string is utf8. Returns strlen() instead
+ * if the string does not look like a valid utf8 string.
+ */
+-int utf8_strnwidth(const char *string, int len, int skip_ansi)
++int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ {
+ int width = 0;
+ const char *orig = string;
+
+- if (len == -1)
+- len = strlen(string);
+ while (string && string < orig + len) {
+ int skip;
+ while (skip_ansi &&
+@@ -225,7 +223,7 @@ int utf8_strnwidth(const char *string, int len, int skip_ansi)
+
+ int utf8_strwidth(const char *string)
+ {
+- return utf8_strnwidth(string, -1, 0);
++ return utf8_strnwidth(string, strlen(string), 0);
+ }
+
+ int is_utf8(const char *text)
+@@ -792,7 +790,7 @@ int skip_utf8_bom(char **text, size_t len)
+ void strbuf_utf8_align(struct strbuf *buf, align_type position, unsigned int width,
+ const char *s)
+ {
+- int slen = strlen(s);
++ size_t slen = strlen(s);
+ int display_len = utf8_strnwidth(s, slen, 0);
+ int utf8_compensation = slen - display_len;
+
+diff --git a/utf8.h b/utf8.h
+index fcd5167..6da1b6d 100644
+--- a/utf8.h
++++ b/utf8.h
+@@ -7,7 +7,7 @@ typedef unsigned int ucs_char_t; /* assuming 32bit int */
+
+ size_t display_mode_esc_sequence_len(const char *s);
+ int utf8_width(const char **start, size_t *remainder_p);
+-int utf8_strnwidth(const char *string, int len, int skip_ansi);
++int utf8_strnwidth(const char *string, size_t len, int skip_ansi);
+ int utf8_strwidth(const char *string);
+ int is_utf8(const char *text);
+ int is_encoding_utf8(const char *name);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch
new file mode 100644
index 0000000000..3de6a5ba6a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-08.patch
@@ -0,0 +1,67 @@
+From 17d23e8a3812a5ca3dd6564e74d5250f22e5d76d Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:00 +0100
+Subject: [PATCH 08/12] utf8: fix returning negative string width
+
+The `utf8_strnwidth()` function calls `utf8_width()` in a loop and adds
+its returned width to the end result. `utf8_width()` can return `-1`
+though in case it reads a control character, which means that the
+computed string width is going to be wrong. In the worst case where
+there are more control characters than non-control characters, we may
+even return a negative string width.
+
+Fix this bug by treating control characters as having zero width.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/17d23e8a3812a5ca3dd6564e74d5250f22e5d76d]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 6 ++++++
+ utf8.c | 8 ++++++--
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 23ac508..261a6f0 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -820,6 +820,12 @@ test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping dire
+ test_cmp expect error
+ '
+
++test_expect_success 'log --pretty with padding and preceding control chars' '
++ printf "\20\20 0" >expect &&
++ git log -1 --pretty="format:%x10%x10%>|(4)%x30" >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+diff --git a/utf8.c b/utf8.c
+index a66984b..6632bd2 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -212,11 +212,15 @@ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ const char *orig = string;
+
+ while (string && string < orig + len) {
+- int skip;
++ int glyph_width, skip;
++
+ while (skip_ansi &&
+ (skip = display_mode_esc_sequence_len(string)) != 0)
+ string += skip;
+- width += utf8_width(&string, NULL);
++
++ glyph_width = utf8_width(&string, NULL);
++ if (glyph_width > 0)
++ width += glyph_width;
+ }
+ return string ? width : len;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch
new file mode 100644
index 0000000000..761d4c6a9f
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-09.patch
@@ -0,0 +1,162 @@
+From 937b71cc8b5b998963a7f9a33312ba3549d55510 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:04 +0100
+Subject: [PATCH 09/12] utf8: fix overflow when returning string width
+
+The return type of both `utf8_strwidth()` and `utf8_strnwidth()` is
+`int`, but we operate on string lengths which are typically of type
+`size_t`. This means that when the string is longer than `INT_MAX`, we
+will overflow and thus return a negative result.
+
+This can lead to an out-of-bounds write with `--pretty=format:%<1)%B`
+and a commit message that is 2^31+1 bytes long:
+
+ =================================================================
+ ==26009==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000001168 at pc 0x7f95c4e5f427 bp 0x7ffd8541c900 sp 0x7ffd8541c0a8
+ WRITE of size 2147483649 at 0x603000001168 thread T0
+ #0 0x7f95c4e5f426 in __interceptor_memcpy /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827
+ #1 0x5612bbb1068c in format_and_pad_commit pretty.c:1763
+ #2 0x5612bbb1087a in format_commit_item pretty.c:1801
+ #3 0x5612bbc33bab in strbuf_expand strbuf.c:429
+ #4 0x5612bbb110e7 in repo_format_commit_message pretty.c:1869
+ #5 0x5612bbb12d96 in pretty_print_commit pretty.c:2161
+ #6 0x5612bba0a4d5 in show_log log-tree.c:781
+ #7 0x5612bba0d6c7 in log_tree_commit log-tree.c:1117
+ #8 0x5612bb691ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #9 0x5612bb69235b in cmd_log_walk builtin/log.c:549
+ #10 0x5612bb6951a2 in cmd_log builtin/log.c:883
+ #11 0x5612bb56c993 in run_builtin git.c:466
+ #12 0x5612bb56d397 in handle_builtin git.c:721
+ #13 0x5612bb56db07 in run_argv git.c:788
+ #14 0x5612bb56e8a7 in cmd_main git.c:923
+ #15 0x5612bb803682 in main common-main.c:57
+ #16 0x7f95c4c3c28f (/usr/lib/libc.so.6+0x2328f)
+ #17 0x7f95c4c3c349 in __libc_start_main (/usr/lib/libc.so.6+0x23349)
+ #18 0x5612bb5680e4 in _start ../sysdeps/x86_64/start.S:115
+
+ 0x603000001168 is located 0 bytes to the right of 24-byte region [0x603000001150,0x603000001168)
+ allocated by thread T0 here:
+ #0 0x7f95c4ebe7ea in __interceptor_realloc /usr/src/debug/gcc/libsanitizer/asan/asan_malloc_linux.cpp:85
+ #1 0x5612bbcdd556 in xrealloc wrapper.c:136
+ #2 0x5612bbc310a3 in strbuf_grow strbuf.c:99
+ #3 0x5612bbc32acd in strbuf_add strbuf.c:298
+ #4 0x5612bbc33aec in strbuf_expand strbuf.c:418
+ #5 0x5612bbb110e7 in repo_format_commit_message pretty.c:1869
+ #6 0x5612bbb12d96 in pretty_print_commit pretty.c:2161
+ #7 0x5612bba0a4d5 in show_log log-tree.c:781
+ #8 0x5612bba0d6c7 in log_tree_commit log-tree.c:1117
+ #9 0x5612bb691ed5 in cmd_log_walk_no_free builtin/log.c:508
+ #10 0x5612bb69235b in cmd_log_walk builtin/log.c:549
+ #11 0x5612bb6951a2 in cmd_log builtin/log.c:883
+ #12 0x5612bb56c993 in run_builtin git.c:466
+ #13 0x5612bb56d397 in handle_builtin git.c:721
+ #14 0x5612bb56db07 in run_argv git.c:788
+ #15 0x5612bb56e8a7 in cmd_main git.c:923
+ #16 0x5612bb803682 in main common-main.c:57
+ #17 0x7f95c4c3c28f (/usr/lib/libc.so.6+0x2328f)
+
+ SUMMARY: AddressSanitizer: heap-buffer-overflow /usr/src/debug/gcc/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc:827 in __interceptor_memcpy
+ Shadow bytes around the buggy address:
+ 0x0c067fff81d0: fd fd fd fa fa fa fd fd fd fa fa fa fd fd fd fa
+ 0x0c067fff81e0: fa fa fd fd fd fd fa fa fd fd fd fd fa fa fd fd
+ 0x0c067fff81f0: fd fa fa fa fd fd fd fa fa fa fd fd fd fa fa fa
+ 0x0c067fff8200: fd fd fd fa fa fa fd fd fd fd fa fa 00 00 00 fa
+ 0x0c067fff8210: fa fa fd fd fd fa fa fa fd fd fd fa fa fa fd fd
+ =>0x0c067fff8220: fd fa fa fa fd fd fd fa fa fa 00 00 00[fa]fa fa
+ 0x0c067fff8230: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8240: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8250: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8260: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ 0x0c067fff8270: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa
+ Shadow byte legend (one shadow byte represents 8 application bytes):
+ Addressable: 00
+ Partially addressable: 01 02 03 04 05 06 07
+ Heap left redzone: fa
+ Freed heap region: fd
+ Stack left redzone: f1
+ Stack mid redzone: f2
+ Stack right redzone: f3
+ Stack after return: f5
+ Stack use after scope: f8
+ Global redzone: f9
+ Global init order: f6
+ Poisoned by user: f7
+ Container overflow: fc
+ Array cookie: ac
+ Intra object redzone: bb
+ ASan internal: fe
+ Left alloca redzone: ca
+ Right alloca redzone: cb
+ ==26009==ABORTING
+
+Now the proper fix for this would be to convert both functions to return
+an `size_t` instead of an `int`. But given that this commit may be part
+of a security release, let's instead do the minimal viable fix and die
+in case we see an overflow.
+
+Add a test that would have previously caused us to crash.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/937b71cc8b5b998963a7f9a33312ba3549d55510]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 8 ++++++++
+ utf8.c | 12 +++++++++---
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 261a6f0..de15007 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -843,4 +843,12 @@ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit mes
+ test_cmp expect actual
+ '
+
++test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message does not cause allocation failure' '
++ test_must_fail git log -1 --format="%<(1)%B" $huge_commit 2>error &&
++ cat >expect <<-EOF &&
++ fatal: number too large to represent as int on this platform: 2147483649
++ EOF
++ test_cmp expect error
++'
++
+ test_done
+diff --git a/utf8.c b/utf8.c
+index 6632bd2..03be475 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -208,11 +208,12 @@ int utf8_width(const char **start, size_t *remainder_p)
+ */
+ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ {
+- int width = 0;
+ const char *orig = string;
++ size_t width = 0;
+
+ while (string && string < orig + len) {
+- int glyph_width, skip;
++ int glyph_width;
++ size_t skip;
+
+ while (skip_ansi &&
+ (skip = display_mode_esc_sequence_len(string)) != 0)
+@@ -222,7 +223,12 @@ int utf8_strnwidth(const char *string, size_t len, int skip_ansi)
+ if (glyph_width > 0)
+ width += glyph_width;
+ }
+- return string ? width : len;
++
++ /*
++ * TODO: fix the interface of this function and `utf8_strwidth()` to
++ * return `size_t` instead of `int`.
++ */
++ return cast_size_t_to_int(string ? width : len);
+ }
+
+ int utf8_strwidth(const char *string)
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch
new file mode 100644
index 0000000000..bbfc6e758f
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-10.patch
@@ -0,0 +1,99 @@
+From 81c2d4c3a5ba0e6ab8c348708441fed170e63a82 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:10 +0100
+Subject: [PATCH 10/12] utf8: fix checking for glyph width in strbuf_utf8_replace()
+
+In `strbuf_utf8_replace()`, we call `utf8_width()` to compute the width
+of the current glyph. If the glyph is a control character though it can
+be that `utf8_width()` returns `-1`, but because we assign this value to
+a `size_t` the conversion will cause us to underflow. This bug can
+easily be triggered with the following command:
+
+ $ git log --pretty='format:xxx%<|(1,trunc)%x10'
+
+>From all I can see though this seems to be a benign underflow that has
+no security-related consequences.
+
+Fix the bug by using an `int` instead. When we see a control character,
+we now copy it into the target buffer but don't advance the current
+width of the string.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/81c2d4c3a5ba0e6ab8c348708441fed170e63a82]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t4205-log-pretty-formats.sh | 7 +++++++
+ utf8.c | 19 ++++++++++++++-----
+ 2 files changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index de15007..52c8bc8 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -826,6 +826,13 @@ test_expect_success 'log --pretty with padding and preceding control chars' '
+ test_cmp expect actual
+ '
+
++test_expect_success 'log --pretty truncation with control chars' '
++ test_commit "$(printf "\20\20\20\20xxxx")" file contents commit-with-control-chars &&
++ printf "\20\20\20\20x.." >expect &&
++ git log -1 --pretty="format:%<(3,trunc)%s" commit-with-control-chars >actual &&
++ test_cmp expect actual
++'
++
+ test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' '
+ # We only assert that this command does not crash. This needs to be
+ # executed with the address sanitizer to demonstrate failure.
+diff --git a/utf8.c b/utf8.c
+index 03be475..ec03e69 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -377,6 +377,7 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ dst = sb_dst.buf;
+
+ while (src < end) {
++ int glyph_width;
+ char *old;
+ size_t n;
+
+@@ -390,21 +391,29 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ break;
+
+ old = src;
+- n = utf8_width((const char**)&src, NULL);
+- if (!src) /* broken utf-8, do nothing */
++ glyph_width = utf8_width((const char**)&src, NULL);
++ if (!src) /* broken utf-8, do nothing */
+ goto out;
+- if (n && w >= pos && w < pos + width) {
++
++ /*
++ * In case we see a control character we copy it into the
++ * buffer, but don't add it to the width.
++ */
++ if (glyph_width < 0)
++ glyph_width = 0;
++
++ if (glyph_width && w >= pos && w < pos + width) {
+ if (subst) {
+ memcpy(dst, subst, subst_len);
+ dst += subst_len;
+ subst = NULL;
+ }
+- w += n;
++ w += glyph_width;
+ continue;
+ }
+ memcpy(dst, old, src - old);
+ dst += src - old;
+- w += n;
++ w += glyph_width;
+ }
+ strbuf_setlen(&sb_dst, dst - sb_dst.buf);
+ strbuf_swap(sb_src, &sb_dst);
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch
new file mode 100644
index 0000000000..f339edfc8a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-11.patch
@@ -0,0 +1,90 @@
+From f930a2394303b902e2973f4308f96529f736b8bc Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:15 +0100
+Subject: [PATCH 11/12] utf8: refactor strbuf_utf8_replace to not rely on preallocated buffer
+
+In `strbuf_utf8_replace`, we preallocate the destination buffer and then
+use `memcpy` to copy bytes into it at computed offsets. This feels
+rather fragile and is hard to understand at times. Refactor the code to
+instead use `strbuf_add` and `strbuf_addstr` so that we can be sure that
+there is no possibility to perform an out-of-bounds write.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/f930a2394303b902e2973f4308f96529f736b8bc]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ utf8.c | 34 +++++++++++++---------------------
+ 1 file changed, 13 insertions(+), 21 deletions(-)
+
+diff --git a/utf8.c b/utf8.c
+index ec03e69..a13f5e3 100644
+--- a/utf8.c
++++ b/utf8.c
+@@ -365,26 +365,20 @@ void strbuf_add_wrapped_bytes(struct strbuf *buf, const char *data, int len,
+ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+ const char *subst)
+ {
+- struct strbuf sb_dst = STRBUF_INIT;
+- char *src = sb_src->buf;
+- char *end = src + sb_src->len;
+- char *dst;
+- int w = 0, subst_len = 0;
++ const char *src = sb_src->buf, *end = sb_src->buf + sb_src->len;
++ struct strbuf dst;
++ int w = 0;
+
+- if (subst)
+- subst_len = strlen(subst);
+- strbuf_grow(&sb_dst, sb_src->len + subst_len);
+- dst = sb_dst.buf;
++ strbuf_init(&dst, sb_src->len);
+
+ while (src < end) {
++ const char *old;
+ int glyph_width;
+- char *old;
+ size_t n;
+
+ while ((n = display_mode_esc_sequence_len(src))) {
+- memcpy(dst, src, n);
++ strbuf_add(&dst, src, n);
+ src += n;
+- dst += n;
+ }
+
+ if (src >= end)
+@@ -404,21 +398,19 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
+
+ if (glyph_width && w >= pos && w < pos + width) {
+ if (subst) {
+- memcpy(dst, subst, subst_len);
+- dst += subst_len;
++ strbuf_addstr(&dst, subst);
+ subst = NULL;
+ }
+- w += glyph_width;
+- continue;
++ } else {
++ strbuf_add(&dst, old, src - old);
+ }
+- memcpy(dst, old, src - old);
+- dst += src - old;
++
+ w += glyph_width;
+ }
+- strbuf_setlen(&sb_dst, dst - sb_dst.buf);
+- strbuf_swap(sb_src, &sb_dst);
++
++ strbuf_swap(sb_src, &dst);
+ out:
+- strbuf_release(&sb_dst);
++ strbuf_release(&dst);
+ }
+
+ /*
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch b/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch
new file mode 100644
index 0000000000..978865978d
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2022-41903-12.patch
@@ -0,0 +1,124 @@
+From 304a50adff6480ede46b68f7545baab542cbfb46 Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 1 Dec 2022 15:47:23 +0100
+Subject: [PATCH 12/12] pretty: restrict input lengths for padding and wrapping formats
+
+Both the padding and wrapping formatting directives allow the caller to
+specify an integer that ultimately leads to us adding this many chars to
+the result buffer. As a consequence, it is trivial to e.g. allocate 2GB
+of RAM via a single formatting directive and cause resource exhaustion
+on the machine executing this logic. Furthermore, it is debatable
+whether there are any sane usecases that require the user to pad data to
+2GB boundaries or to indent wrapped data by 2GB.
+
+Restrict the input sizes to 16 kilobytes at a maximum to limit the
+amount of bytes that can be requested by the user. This is not meant
+as a fix because there are ways to trivially amplify the amount of
+data we generate via formatting directives; the real protection is
+achieved by the changes in previous steps to catch and avoid integer
+wraparound that causes us to under-allocate and access beyond the
+end of allocated memory reagions. But having such a limit
+significantly helps fuzzing the pretty format, because the fuzzer is
+otherwise quite fast to run out-of-memory as it discovers these
+formatters.
+
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/304a50adff6480ede46b68f7545baab542cbfb46]
+CVE: CVE-2022-41903
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ pretty.c | 26 ++++++++++++++++++++++++++
+ t/t4205-log-pretty-formats.sh | 24 +++++++++++++++---------
+ 2 files changed, 41 insertions(+), 9 deletions(-)
+
+diff --git a/pretty.c b/pretty.c
+index c3c1443..e9687f0 100644
+--- a/pretty.c
++++ b/pretty.c
+@@ -13,6 +13,13 @@
+ #include "gpg-interface.h"
+ #include "trailer.h"
+
++/*
++ * The limit for formatting directives, which enable the caller to append
++ * arbitrarily many bytes to the formatted buffer. This includes padding
++ * and wrapping formatters.
++ */
++#define FORMATTING_LIMIT (16 * 1024)
++
+ static char *user_format;
+ static struct cmt_fmt_map {
+ const char *name;
+@@ -1029,6 +1036,15 @@ static size_t parse_padding_placeholder(const char *placeholder,
+ if (!*end || end == start)
+ return 0;
+ width = strtol(start, &next, 10);
++
++ /*
++ * We need to limit the amount of padding, or otherwise this
++ * would allow the user to pad the buffer by arbitrarily many
++ * bytes and thus cause resource exhaustion.
++ */
++ if (width < -FORMATTING_LIMIT || width > FORMATTING_LIMIT)
++ return 0;
++
+ if (next == start || width == 0)
+ return 0;
+ if (width < 0) {
+@@ -1188,6 +1204,16 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
+ if (*next != ')')
+ return 0;
+ }
++
++ /*
++ * We need to limit the format here as it allows the
++ * user to prepend arbitrarily many bytes to the buffer
++ * when rewrapping.
++ */
++ if (width > FORMATTING_LIMIT ||
++ indent1 > FORMATTING_LIMIT ||
++ indent2 > FORMATTING_LIMIT)
++ return 0;
+ rewrap_message_tail(sb, c, width, indent1, indent2);
+ return end - placeholder + 1;
+ } else
+diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh
+index 52c8bc8..572d02f 100755
+--- a/t/t4205-log-pretty-formats.sh
++++ b/t/t4205-log-pretty-formats.sh
+@@ -809,15 +809,21 @@ test_expect_success 'log --pretty with magical wrapping directives' '
+ '
+
+ test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' '
+- cat >expect <<-EOF &&
+- fatal: number too large to represent as int on this platform: 2147483649
+- EOF
+- test_must_fail git log -1 --pretty="format:%w(2147483649,1,1)%d" 2>error &&
+- test_cmp expect error &&
+- test_must_fail git log -1 --pretty="format:%w(1,2147483649,1)%d" 2>error &&
+- test_cmp expect error &&
+- test_must_fail git log -1 --pretty="format:%w(1,1,2147483649)%d" 2>error &&
+- test_cmp expect error
++ printf "%%w(2147483649,1,1)0" >expect &&
++ git log -1 --pretty="format:%w(2147483649,1,1)%x30" >actual &&
++ test_cmp expect actual &&
++ printf "%%w(1,2147483649,1)0" >expect &&
++ git log -1 --pretty="format:%w(1,2147483649,1)%x30" >actual &&
++ test_cmp expect actual &&
++ printf "%%w(1,1,2147483649)0" >expect &&
++ git log -1 --pretty="format:%w(1,1,2147483649)%x30" >actual &&
++ test_cmp expect actual
++'
++
++test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing padding directive' '
++ printf "%%<(2147483649)0" >expect &&
++ git log -1 --pretty="format:%<(2147483649)%x30" >actual &&
++ test_cmp expect actual
+ '
+
+ test_expect_success 'log --pretty with padding and preceding control chars' '
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch
new file mode 100644
index 0000000000..cc9b448c5c
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-1.patch
@@ -0,0 +1,179 @@
+From 58325b93c5b6212697b088371809e9948fee8052 Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:45 -0500
+Subject: [PATCH 1/3] t5619: demonstrate clone_local() with ambiguous transport
+
+When cloning a repository, Git must determine (a) what transport
+mechanism to use, and (b) whether or not the clone is local.
+
+Since f38aa83 (use local cloning if insteadOf makes a local URL,
+2014-07-17), the latter check happens after the remote has been
+initialized, and references the remote's URL instead of the local path.
+This is done to make it possible for a `url.<base>.insteadOf` rule to
+convert a remote URL into a local one, in which case the `clone_local()`
+mechanism should be used.
+
+However, with a specially crafted repository, Git can be tricked into
+using a non-local transport while still setting `is_local` to "1" and
+using the `clone_local()` optimization. The below test case
+demonstrates such an instance, and shows that it can be used to include
+arbitrary (known) paths in the working copy of a cloned repository on a
+victim's machine[^1], even if local file clones are forbidden by
+`protocol.file.allow`.
+
+This happens in a few parts:
+
+ 1. We first call `get_repo_path()` to see if the remote is a local
+ path. If it is, we replace the repo name with its absolute path.
+
+ 2. We then call `transport_get()` on the repo name and decide how to
+ access it. If it was turned into an absolute path in the previous
+ step, then we should always treat it like a file.
+
+ 3. We use `get_repo_path()` again, and set `is_local` as appropriate.
+ But it's already too late to rewrite the repo name as an absolute
+ path, since we've already fed it to the transport code.
+
+The attack works by including a submodule whose URL corresponds to a
+path on disk. In the below example, the repository "sub" is reachable
+via the dumb HTTP protocol at (something like):
+
+ http://127.0.0.1:NNNN/dumb/sub.git
+
+However, the path "http:/127.0.0.1:NNNN/dumb" (that is, a top-level
+directory called "http:", then nested directories "127.0.0.1:NNNN", and
+"dumb") exists within the repository, too.
+
+To determine this, it first picks the appropriate transport, which is
+dumb HTTP. It then uses the remote's URL in order to determine whether
+the repository exists locally on disk. However, the malicious repository
+also contains an embedded stub repository which is the target of a
+symbolic link at the local path corresponding to the "sub" repository on
+disk (i.e., there is a symbolic link at "http:/127.0.0.1/dumb/sub.git",
+pointing to the stub repository via ".git/modules/sub/../../../repo").
+
+This stub repository fools Git into thinking that a local repository
+exists at that URL and thus can be cloned locally. The affected call is
+in `get_repo_path()`, which in turn calls `get_repo_path_1()`, which
+locates a valid repository at that target.
+
+This then causes Git to set the `is_local` variable to "1", and in turn
+instructs Git to clone the repository using its local clone optimization
+via the `clone_local()` function.
+
+The exploit comes into play because the stub repository's top-level
+"$GIT_DIR/objects" directory is a symbolic link which can point to an
+arbitrary path on the victim's machine. `clone_local()` resolves the
+top-level "objects" directory through a `stat(2)` call, meaning that we
+read through the symbolic link and copy or hardlink the directory
+contents at the destination of the link.
+
+In other words, we can get steps (1) and (3) to disagree by leveraging
+the dangling symlink to pick a non-local transport in the first step,
+and then set is_local to "1" in the third step when cloning with
+`--separate-git-dir`, which makes the symlink non-dangling.
+
+This can result in data-exfiltration on the victim's machine when
+sensitive data is at a known path (e.g., "/home/$USER/.ssh").
+
+The appropriate fix is two-fold:
+
+ - Resolve the transport later on (to avoid using the local
+ clone optimization with a non-local transport).
+
+ - Avoid reading through the top-level "objects" directory when
+ (correctly) using the clone_local() optimization.
+
+This patch merely demonstrates the issue. The following two patches will
+implement each part of the above fix, respectively.
+
+[^1]: Provided that any target directory does not contain symbolic
+ links, in which case the changes from 6f054f9 (builtin/clone.c:
+ disallow `--local` clones with symlinks, 2022-07-28) will abort the
+ clone.
+
+Reported-by: yvvdwf <yvvdwf@gmail.com>
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/58325b93c5b6212697b088371809e9948fee8052]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ t/t5619-clone-local-ambiguous-transport.sh | 63 ++++++++++++++++++++++
+ 1 file changed, 63 insertions(+)
+ create mode 100644 t/t5619-clone-local-ambiguous-transport.sh
+
+diff --git a/t/t5619-clone-local-ambiguous-transport.sh b/t/t5619-clone-local-ambiguous-transport.sh
+new file mode 100644
+index 0000000..7ebd31a
+--- /dev/null
++++ b/t/t5619-clone-local-ambiguous-transport.sh
+@@ -0,0 +1,63 @@
++#!/bin/sh
++
++test_description='test local clone with ambiguous transport'
++
++. ./test-lib.sh
++. "$TEST_DIRECTORY/lib-httpd.sh"
++
++if ! test_have_prereq SYMLINKS
++then
++ skip_all='skipping test, symlink support unavailable'
++ test_done
++fi
++
++start_httpd
++
++REPO="$HTTPD_DOCUMENT_ROOT_PATH/sub.git"
++URI="$HTTPD_URL/dumb/sub.git"
++
++test_expect_success 'setup' '
++ mkdir -p sensitive &&
++ echo "secret" >sensitive/secret &&
++
++ git init --bare "$REPO" &&
++ test_commit_bulk -C "$REPO" --ref=main 1 &&
++
++ git -C "$REPO" update-ref HEAD main &&
++ git -C "$REPO" update-server-info &&
++
++ git init malicious &&
++ (
++ cd malicious &&
++
++ git submodule add "$URI" &&
++
++ mkdir -p repo/refs &&
++ touch repo/refs/.gitkeep &&
++ printf "ref: refs/heads/a" >repo/HEAD &&
++ ln -s "$(cd .. && pwd)/sensitive" repo/objects &&
++
++ mkdir -p "$HTTPD_URL/dumb" &&
++ ln -s "../../../.git/modules/sub/../../../repo/" "$URI" &&
++
++ git add . &&
++ git commit -m "initial commit"
++ ) &&
++
++ # Delete all of the references in our malicious submodule to
++ # avoid the client attempting to checkout any objects (which
++ # will be missing, and thus will cause the clone to fail before
++ # we can trigger the exploit).
++ git -C "$REPO" for-each-ref --format="delete %(refname)" >in &&
++ git -C "$REPO" update-ref --stdin <in &&
++ git -C "$REPO" update-server-info
++'
++
++test_expect_failure 'ambiguous transport does not lead to arbitrary file-inclusion' '
++ git clone malicious clone &&
++ git -C clone submodule update --init &&
++
++ test_path_is_missing clone/.git/modules/sub/objects/secret
++'
++
++test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch
new file mode 100644
index 0000000000..0b5b40f827
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-2.patch
@@ -0,0 +1,122 @@
+From cf8f6ce02a13f4d1979a53241afbee15a293fce9 Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:48 -0500
+Subject: [PATCH 2/3] clone: delay picking a transport until after get_repo_path()
+
+In the previous commit, t5619 demonstrates an issue where two calls to
+`get_repo_path()` could trick Git into using its local clone mechanism
+in conjunction with a non-local transport.
+
+That sequence is:
+
+ - the starting state is that the local path https:/example.com/foo is a
+ symlink that points to ../../../.git/modules/foo. So it's dangling.
+
+ - get_repo_path() sees that no such path exists (because it's
+ dangling), and thus we do not canonicalize it into an absolute path
+
+ - because we're using --separate-git-dir, we create .git/modules/foo.
+ Now our symlink is no longer dangling!
+
+ - we pass the url to transport_get(), which sees it as an https URL.
+
+ - we call get_repo_path() again, on the url. This second call was
+ introduced by f38aa83 (use local cloning if insteadOf makes a
+ local URL, 2014-07-17). The idea is that we want to pull the url
+ fresh from the remote.c API, because it will apply any aliases.
+
+And of course now it sees that there is a local file, which is a
+mismatch with the transport we already selected.
+
+The issue in the above sequence is calling `transport_get()` before
+deciding whether or not the repository is indeed local, and not passing
+in an absolute path if it is local.
+
+This is reminiscent of a similar bug report in [1], where it was
+suggested to perform the `insteadOf` lookup earlier. Taking that
+approach may not be as straightforward, since the intent is to store the
+original URL in the config, but to actually fetch from the insteadOf
+one, so conflating the two early on is a non-starter.
+
+Note: we pass the path returned by `get_repo_path(remote->url[0])`,
+which should be the same as `repo_name` (aside from any `insteadOf`
+rewrites).
+
+We *could* pass `absolute_pathdup()` of the same argument, which
+86521ac (Bring local clone's origin URL in line with that of a remote
+clone, 2008-09-01) indicates may differ depending on the presence of
+".git/" for a non-bare repo. That matters for forming relative submodule
+paths, but doesn't matter for the second call, since we're just feeding
+it to the transport code, which is fine either way.
+
+[1]: https://lore.kernel.org/git/CAMoD=Bi41mB3QRn3JdZL-FGHs4w3C2jGpnJB-CqSndO7FMtfzA@mail.gmail.com/
+
+Signed-off-by: Jeff King <peff@peff.net>
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/cf8f6ce02a13f4d1979a53241afbee15a293fce9]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ builtin/clone.c | 8 ++++----
+ t/t5619-clone-local-ambiguous-transport.sh | 15 +++++++++++----
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/builtin/clone.c b/builtin/clone.c
+index 53e04b1..b57e703 100644
+--- a/builtin/clone.c
++++ b/builtin/clone.c
+@@ -1112,10 +1112,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
+ branch_top.buf);
+ refspec_append(&remote->fetch, default_refspec.buf);
+
+- transport = transport_get(remote, remote->url[0]);
+- transport_set_verbosity(transport, option_verbosity, option_progress);
+- transport->family = family;
+-
+ path = get_repo_path(remote->url[0], &is_bundle);
+ is_local = option_local != 0 && path && !is_bundle;
+ if (is_local) {
+@@ -1135,6 +1131,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
+ }
+ if (option_local > 0 && !is_local)
+ warning(_("--local is ignored"));
++
++ transport = transport_get(remote, path ? path : remote->url[0]);
++ transport_set_verbosity(transport, option_verbosity, option_progress);
++ transport->family = family;
+ transport->cloning = 1;
+
+ transport_set_option(transport, TRANS_OPT_KEEP, "yes");
+diff --git a/t/t5619-clone-local-ambiguous-transport.sh b/t/t5619-clone-local-ambiguous-transport.sh
+index 7ebd31a..cce62bf 100644
+--- a/t/t5619-clone-local-ambiguous-transport.sh
++++ b/t/t5619-clone-local-ambiguous-transport.sh
+@@ -53,11 +53,18 @@ test_expect_success 'setup' '
+ git -C "$REPO" update-server-info
+ '
+
+-test_expect_failure 'ambiguous transport does not lead to arbitrary file-inclusion' '
++test_expect_success 'ambiguous transport does not lead to arbitrary file-inclusion' '
+ git clone malicious clone &&
+- git -C clone submodule update --init &&
+-
+- test_path_is_missing clone/.git/modules/sub/objects/secret
++ test_must_fail git -C clone submodule update --init 2>err &&
++
++ test_path_is_missing clone/.git/modules/sub/objects/secret &&
++ # We would actually expect "transport .file. not allowed" here,
++ # but due to quirks of the URL detection in Git, we mis-parse
++ # the absolute path as a bogus URL and die before that step.
++ #
++ # This works for now, and if we ever fix the URL detection, it
++ # is OK to change this to detect the transport error.
++ grep "protocol .* is not supported" err
+ '
+
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch b/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch
new file mode 100644
index 0000000000..08fb7f840b
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-22490-3.patch
@@ -0,0 +1,154 @@
+From bffc762f87ae8d18c6001bf0044a76004245754c Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Tue, 24 Jan 2023 19:43:51 -0500
+Subject: [PATCH 3/3] dir-iterator: prevent top-level symlinks without FOLLOW_SYMLINKS
+
+When using the dir_iterator API, we first stat(2) the base path, and
+then use that as a starting point to enumerate the directory's contents.
+
+If the directory contains symbolic links, we will immediately die() upon
+encountering them without the `FOLLOW_SYMLINKS` flag. The same is not
+true when resolving the top-level directory, though.
+
+As explained in a previous commit, this oversight in 6f054f9
+(builtin/clone.c: disallow `--local` clones with symlinks, 2022-07-28)
+can be used as an attack vector to include arbitrary files on a victim's
+filesystem from outside of the repository.
+
+Prevent resolving top-level symlinks unless the FOLLOW_SYMLINKS flag is
+given, which will cause clones of a repository with a symlink'd
+"$GIT_DIR/objects" directory to fail.
+
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/bffc762f87ae8d18c6001bf0044a76004245754c]
+CVE: CVE-2023-22490
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dir-iterator.c | 13 +++++++++----
+ dir-iterator.h | 5 +++++
+ t/t0066-dir-iterator.sh | 27 ++++++++++++++++++++++++++-
+ t/t5604-clone-reference.sh | 16 ++++++++++++++++
+ 4 files changed, 56 insertions(+), 5 deletions(-)
+
+diff --git a/dir-iterator.c b/dir-iterator.c
+index b17e9f9..3764dd8 100644
+--- a/dir-iterator.c
++++ b/dir-iterator.c
+@@ -203,7 +203,7 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags)
+ {
+ struct dir_iterator_int *iter = xcalloc(1, sizeof(*iter));
+ struct dir_iterator *dir_iterator = &iter->base;
+- int saved_errno;
++ int saved_errno, err;
+
+ strbuf_init(&iter->base.path, PATH_MAX);
+ strbuf_addstr(&iter->base.path, path);
+@@ -213,10 +213,15 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags)
+ iter->flags = flags;
+
+ /*
+- * Note: stat already checks for NULL or empty strings and
+- * inexistent paths.
++ * Note: stat/lstat already checks for NULL or empty strings and
++ * nonexistent paths.
+ */
+- if (stat(iter->base.path.buf, &iter->base.st) < 0) {
++ if (iter->flags & DIR_ITERATOR_FOLLOW_SYMLINKS)
++ err = stat(iter->base.path.buf, &iter->base.st);
++ else
++ err = lstat(iter->base.path.buf, &iter->base.st);
++
++ if (err < 0) {
+ saved_errno = errno;
+ goto error_out;
+ }
+diff --git a/dir-iterator.h b/dir-iterator.h
+index 0822915..e3b6ff2 100644
+--- a/dir-iterator.h
++++ b/dir-iterator.h
+@@ -61,6 +61,11 @@
+ * not the symlinks themselves, which is the default behavior. Broken
+ * symlinks are ignored.
+ *
++ * Note: setting DIR_ITERATOR_FOLLOW_SYMLINKS affects resolving the
++ * starting path as well (e.g., attempting to iterate starting at a
++ * symbolic link pointing to a directory without FOLLOW_SYMLINKS will
++ * result in an error).
++ *
+ * Warning: circular symlinks are also followed when
+ * DIR_ITERATOR_FOLLOW_SYMLINKS is set. The iteration may end up with
+ * an ELOOP if they happen and DIR_ITERATOR_PEDANTIC is set.
+diff --git a/t/t0066-dir-iterator.sh b/t/t0066-dir-iterator.sh
+index 92910e4..c826f60 100755
+--- a/t/t0066-dir-iterator.sh
++++ b/t/t0066-dir-iterator.sh
+@@ -109,7 +109,9 @@ test_expect_success SYMLINKS 'setup dirs with symlinks' '
+ mkdir -p dir5/a/c &&
+ ln -s ../c dir5/a/b/d &&
+ ln -s ../ dir5/a/b/e &&
+- ln -s ../../ dir5/a/b/f
++ ln -s ../../ dir5/a/b/f &&
++
++ ln -s dir4 dir6
+ '
+
+ test_expect_success SYMLINKS 'dir-iterator should not follow symlinks by default' '
+@@ -145,4 +147,27 @@ test_expect_success SYMLINKS 'dir-iterator should follow symlinks w/ follow flag
+ test_cmp expected-follow-sorted-output actual-follow-sorted-output
+ '
+
++test_expect_success SYMLINKS 'dir-iterator does not resolve top-level symlinks' '
++ test_must_fail test-tool dir-iterator ./dir6 >out &&
++
++ grep "ENOTDIR" out
++'
++
++test_expect_success SYMLINKS 'dir-iterator resolves top-level symlinks w/ follow flag' '
++ cat >expected-follow-sorted-output <<-EOF &&
++ [d] (a) [a] ./dir6/a
++ [d] (a/f) [f] ./dir6/a/f
++ [d] (a/f/c) [c] ./dir6/a/f/c
++ [d] (b) [b] ./dir6/b
++ [d] (b/c) [c] ./dir6/b/c
++ [f] (a/d) [d] ./dir6/a/d
++ [f] (a/e) [e] ./dir6/a/e
++ EOF
++
++ test-tool dir-iterator --follow-symlinks ./dir6 >out &&
++ sort out >actual-follow-sorted-output &&
++
++ test_cmp expected-follow-sorted-output actual-follow-sorted-output
++'
++
+ test_done
+diff --git a/t/t5604-clone-reference.sh b/t/t5604-clone-reference.sh
+index 4894237..615b981 100755
+--- a/t/t5604-clone-reference.sh
++++ b/t/t5604-clone-reference.sh
+@@ -354,4 +354,20 @@ test_expect_success SYMLINKS 'clone repo with symlinked or unknown files at obje
+ test_must_be_empty T--shared.objects-symlinks.raw
+ '
+
++test_expect_success SYMLINKS 'clone repo with symlinked objects directory' '
++ test_when_finished "rm -fr sensitive malicious" &&
++
++ mkdir -p sensitive &&
++ echo "secret" >sensitive/file &&
++
++ git init malicious &&
++ rm -fr malicious/.git/objects &&
++ ln -s "$(pwd)/sensitive" ./malicious/.git/objects &&
++
++ test_must_fail git clone --local malicious clone 2>err &&
++
++ test_path_is_missing clone &&
++ grep "failed to start iterator over" err
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-23946.patch b/meta/recipes-devtools/git/files/CVE-2023-23946.patch
new file mode 100644
index 0000000000..3629ff57b2
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-23946.patch
@@ -0,0 +1,184 @@
+From fade728df1221598f42d391cf377e9e84a32053f Mon Sep 17 00:00:00 2001
+From: Patrick Steinhardt <ps@pks.im>
+Date: Thu, 2 Feb 2023 11:54:34 +0100
+Subject: [PATCH] apply: fix writing behind newly created symbolic links
+
+When writing files git-apply(1) initially makes sure that none of the
+files it is about to create are behind a symlink:
+
+```
+ $ git init repo
+ Initialized empty Git repository in /tmp/repo/.git/
+ $ cd repo/
+ $ ln -s dir symlink
+ $ git apply - <<EOF
+ diff --git a/symlink/file b/symlink/file
+ new file mode 100644
+ index 0000000..e69de29
+ EOF
+ error: affected file 'symlink/file' is beyond a symbolic link
+```
+
+This safety mechanism is crucial to ensure that we don't write outside
+of the repository's working directory. It can be fooled though when the
+patch that is being applied creates the symbolic link in the first
+place, which can lead to writing files in arbitrary locations.
+
+Fix this by checking whether the path we're about to create is
+beyond a symlink or not. Tightening these checks like this should be
+fine as we already have these precautions in Git as explained
+above. Ideally, we should update the check we do up-front before
+starting to reflect the computed changes to the working tree so that
+we catch this case as well, but as part of embargoed security work,
+adding an equivalent check just before we try to write out a file
+should serve us well as a reasonable first step.
+
+Digging back into history shows that this vulnerability has existed
+since at least Git v2.9.0. As Git v2.8.0 and older don't build on my
+system anymore I cannot tell whether older versions are affected, as
+well.
+
+Reported-by: Joern Schneeweisz <jschneeweisz@gitlab.com>
+Signed-off-by: Patrick Steinhardt <ps@pks.im>
+Signed-off-by: Junio C Hamano <gitster@pobox.com>
+
+Upstream-Status: Backport
+[https://github.com/git/git/commit/fade728df1221598f42d391cf377e9e84a32053f]
+CVE: CVE-2023-23946
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ apply.c | 27 ++++++++++++++
+ t/t4115-apply-symlink.sh | 81 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 108 insertions(+)
+
+diff --git a/apply.c b/apply.c
+index f8a046a..4f303bf 100644
+--- a/apply.c
++++ b/apply.c
+@@ -4373,6 +4373,33 @@ static int create_one_file(struct apply_state *state,
+ if (state->cached)
+ return 0;
+
++ /*
++ * We already try to detect whether files are beyond a symlink in our
++ * up-front checks. But in the case where symlinks are created by any
++ * of the intermediate hunks it can happen that our up-front checks
++ * didn't yet see the symlink, but at the point of arriving here there
++ * in fact is one. We thus repeat the check for symlinks here.
++ *
++ * Note that this does not make the up-front check obsolete as the
++ * failure mode is different:
++ *
++ * - The up-front checks cause us to abort before we have written
++ * anything into the working directory. So when we exit this way the
++ * working directory remains clean.
++ *
++ * - The checks here happen in the middle of the action where we have
++ * already started to apply the patch. The end result will be a dirty
++ * working directory.
++ *
++ * Ideally, we should update the up-front checks to catch what would
++ * happen when we apply the patch before we damage the working tree.
++ * We have all the information necessary to do so. But for now, as a
++ * part of embargoed security work, having this check would serve as a
++ * reasonable first step.
++ */
++ if (path_is_beyond_symlink(state, path))
++ return error(_("affected file '%s' is beyond a symbolic link"), path);
++
+ res = try_create_file(state, path, mode, buf, size);
+ if (res < 0)
+ return -1;
+diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh
+index 872fcda..1acb7b2 100755
+--- a/t/t4115-apply-symlink.sh
++++ b/t/t4115-apply-symlink.sh
+@@ -44,4 +44,85 @@ test_expect_success 'apply --index symlink patch' '
+
+ '
+
++test_expect_success 'symlink setup' '
++ ln -s .git symlink &&
++ git add symlink &&
++ git commit -m "add symlink"
++'
++
++test_expect_success SYMLINKS 'symlink escape when creating new files' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git /dev/null b/renamed-symlink/create-me
++ new file mode 100644
++ index 0000000..039727e
++ --- /dev/null
++ +++ b/renamed-symlink/create-me
++ @@ -0,0 +1,1 @@
++ +busted
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: affected file ${SQ}renamed-symlink/create-me${SQ} is beyond a symbolic link
++ EOF
++ test_cmp expected_stderr stderr &&
++ ! test_path_exists .git/create-me
++'
++
++test_expect_success SYMLINKS 'symlink escape when modifying file' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++ touch .git/modify-me &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git a/renamed-symlink/modify-me b/renamed-symlink/modify-me
++ index 1111111..2222222 100644
++ --- a/renamed-symlink/modify-me
++ +++ b/renamed-symlink/modify-me
++ @@ -0,0 +1,1 @@
++ +busted
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: renamed-symlink/modify-me: No such file or directory
++ EOF
++ test_cmp expected_stderr stderr &&
++ test_must_be_empty .git/modify-me
++'
++
++test_expect_success SYMLINKS 'symlink escape when deleting file' '
++ test_when_finished "git reset --hard && git clean -dfx && rm .git/delete-me" &&
++ touch .git/delete-me &&
++
++ cat >patch <<-EOF &&
++ diff --git a/symlink b/renamed-symlink
++ similarity index 100%
++ rename from symlink
++ rename to renamed-symlink
++ --
++ diff --git a/renamed-symlink/delete-me b/renamed-symlink/delete-me
++ deleted file mode 100644
++ index 1111111..0000000 100644
++ EOF
++
++ test_must_fail git apply patch 2>stderr &&
++ cat >expected_stderr <<-EOF &&
++ error: renamed-symlink/delete-me: No such file or directory
++ EOF
++ test_cmp expected_stderr stderr &&
++ test_path_is_file .git/delete-me
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-25652.patch b/meta/recipes-devtools/git/files/CVE-2023-25652.patch
new file mode 100644
index 0000000000..d6b17a2b8a
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-25652.patch
@@ -0,0 +1,94 @@
+From 9db05711c98efc14f414d4c87135a34c13586e0b Mon Sep 17 00:00:00 2001
+From: Johannes Schindelin <johannes.schindelin@gmx.de>
+Date: Thu, 9 Mar 2023 16:02:54 +0100
+Subject: [PATCH] apply --reject: overwrite existing `.rej` symlink if it
+ exists
+
+The `git apply --reject` is expected to write out `.rej` files in case
+one or more hunks fail to apply cleanly. Historically, the command
+overwrites any existing `.rej` files. The idea being that
+apply/reject/edit cycles are relatively common, and the generated `.rej`
+files are not considered precious.
+
+But the command does not overwrite existing `.rej` symbolic links, and
+instead follows them. This is unsafe because the same patch could
+potentially create such a symbolic link and point at arbitrary paths
+outside the current worktree, and `git apply` would write the contents
+of the `.rej` file into that location.
+
+Therefore, let's make sure that any existing `.rej` file or symbolic
+link is removed before writing it.
+
+Reported-by: RyotaK <ryotak.mail@gmail.com>
+Helped-by: Taylor Blau <me@ttaylorr.com>
+Helped-by: Junio C Hamano <gitster@pobox.com>
+Helped-by: Linus Torvalds <torvalds@linuxfoundation.org>
+Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/9db05711c98efc14f414d4c87135a34c13586e0b]
+CVE: CVE-2023-25652
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ apply.c | 14 ++++++++++++--
+ t/t4115-apply-symlink.sh | 15 +++++++++++++++
+ 2 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/apply.c b/apply.c
+index 4f303bf..aa7111d 100644
+--- a/apply.c
++++ b/apply.c
+@@ -4531,7 +4531,7 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ FILE *rej;
+ char namebuf[PATH_MAX];
+ struct fragment *frag;
+- int cnt = 0;
++ int fd, cnt = 0;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) {
+@@ -4571,7 +4571,17 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
+ memcpy(namebuf, patch->new_name, cnt);
+ memcpy(namebuf + cnt, ".rej", 5);
+
+- rej = fopen(namebuf, "w");
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0) {
++ if (errno != EEXIST)
++ return error_errno(_("cannot open %s"), namebuf);
++ if (unlink(namebuf))
++ return error_errno(_("cannot unlink '%s'"), namebuf);
++ fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666);
++ if (fd < 0)
++ return error_errno(_("cannot open %s"), namebuf);
++ }
++ rej = fdopen(fd, "w");
+ if (!rej)
+ return error_errno(_("cannot open %s"), namebuf);
+
+diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh
+index 1acb7b2..2b034ff 100755
+--- a/t/t4115-apply-symlink.sh
++++ b/t/t4115-apply-symlink.sh
+@@ -125,4 +125,19 @@ test_expect_success SYMLINKS 'symlink escape when deleting file' '
+ test_path_is_file .git/delete-me
+ '
+
++test_expect_success SYMLINKS '--reject removes .rej symlink if it exists' '
++ test_when_finished "git reset --hard && git clean -dfx" &&
++
++ test_commit file &&
++ echo modified >file.t &&
++ git diff -- file.t >patch &&
++ echo modified-again >file.t &&
++
++ ln -s foo file.t.rej &&
++ test_must_fail git apply patch --reject 2>err &&
++ test_i18ngrep "Rejected hunk" err &&
++ test_path_is_missing foo &&
++ test_path_is_file file.t.rej
++'
++
+ test_done
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/files/CVE-2023-29007.patch b/meta/recipes-devtools/git/files/CVE-2023-29007.patch
new file mode 100644
index 0000000000..e166c01412
--- /dev/null
+++ b/meta/recipes-devtools/git/files/CVE-2023-29007.patch
@@ -0,0 +1,159 @@
+From 057c07a7b1fae22fdeef26c243f4cfbe3afc90ce Mon Sep 17 00:00:00 2001
+From: Taylor Blau <me@ttaylorr.com>
+Date: Fri, 14 Apr 2023 11:46:59 -0400
+Subject: [PATCH] Merge branch 'tb/config-copy-or-rename-in-file-injection'
+
+Avoids issues with renaming or deleting sections with long lines, where
+configuration values may be interpreted as sections, leading to
+configuration injection. Addresses CVE-2023-29007.
+
+* tb/config-copy-or-rename-in-file-injection:
+ config.c: disallow overly-long lines in `copy_or_rename_section_in_file()`
+ config.c: avoid integer truncation in `copy_or_rename_section_in_file()`
+ config: avoid fixed-sized buffer when renaming/deleting a section
+ t1300: demonstrate failure when renaming sections with long lines
+
+Signed-off-by: Taylor Blau <me@ttaylorr.com>
+
+Upstream-Status: Backport [https://github.com/git/git/commit/528290f8c61222433a8cf02fb7cfffa8438432b4]
+CVE: CVE-2023-29007
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ config.c | 36 +++++++++++++++++++++++++-----------
+ t/t1300-config.sh | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 55 insertions(+), 11 deletions(-)
+
+diff --git a/config.c b/config.c
+index e7052b3..676b687 100644
+--- a/config.c
++++ b/config.c
+@@ -2987,9 +2987,10 @@ void git_config_set_multivar(const char *key, const char *value,
+ multi_replace);
+ }
+
+-static int section_name_match (const char *buf, const char *name)
++static size_t section_name_match (const char *buf, const char *name)
+ {
+- int i = 0, j = 0, dot = 0;
++ size_t i = 0, j = 0;
++ int dot = 0;
+ if (buf[i] != '[')
+ return 0;
+ for (i = 1; buf[i] && buf[i] != ']'; i++) {
+@@ -3042,6 +3043,8 @@ static int section_name_is_ok(const char *name)
+ return 1;
+ }
+
++#define GIT_CONFIG_MAX_LINE_LEN (512 * 1024)
++
+ /* if new_name == NULL, the section is removed instead */
+ static int git_config_copy_or_rename_section_in_file(const char *config_filename,
+ const char *old_name,
+@@ -3051,11 +3054,12 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ char *filename_buf = NULL;
+ struct lock_file lock = LOCK_INIT;
+ int out_fd;
+- char buf[1024];
++ struct strbuf buf = STRBUF_INIT;
+ FILE *config_file = NULL;
+ struct stat st;
+ struct strbuf copystr = STRBUF_INIT;
+ struct config_store_data store;
++ uint32_t line_nr = 0;
+
+ memset(&store, 0, sizeof(store));
+
+@@ -3092,16 +3096,25 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ goto out;
+ }
+
+- while (fgets(buf, sizeof(buf), config_file)) {
+- int i;
+- int length;
++ while (!strbuf_getwholeline(&buf, config_file, '\n')) {
++ size_t i, length;
+ int is_section = 0;
+- char *output = buf;
+- for (i = 0; buf[i] && isspace(buf[i]); i++)
++ char *output = buf.buf;
++
++ line_nr++;
++
++ if (buf.len >= GIT_CONFIG_MAX_LINE_LEN) {
++ ret = error(_("refusing to work with overly long line "
++ "in '%s' on line %"PRIuMAX),
++ config_filename, (uintmax_t)line_nr);
++ goto out;
++ }
++
++ for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++)
+ ; /* do nothing */
+- if (buf[i] == '[') {
++ if (buf.buf[i] == '[') {
+ /* it's a section */
+- int offset;
++ size_t offset;
+ is_section = 1;
+
+ /*
+@@ -3118,7 +3131,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ strbuf_reset(&copystr);
+ }
+
+- offset = section_name_match(&buf[i], old_name);
++ offset = section_name_match(&buf.buf[i], old_name);
+ if (offset > 0) {
+ ret++;
+ if (new_name == NULL) {
+@@ -3193,6 +3206,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
+ out_no_rollback:
+ free(filename_buf);
+ config_store_data_clear(&store);
++ strbuf_release(&buf);
+ return ret;
+ }
+
+diff --git a/t/t1300-config.sh b/t/t1300-config.sh
+index 983a0a1..9b67f6b 100755
+--- a/t/t1300-config.sh
++++ b/t/t1300-config.sh
+@@ -616,6 +616,36 @@ test_expect_success 'renaming to bogus section is rejected' '
+ test_must_fail git config --rename-section branch.zwei "bogus name"
+ '
+
++test_expect_success 'renaming a section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y b.e
++'
++
++test_expect_success 'renaming an embedded section with a long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %1024s [a] [foo] e = f\\n" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ git config -f y --rename-section a xyz &&
++ test_must_fail git config -f y foo.e
++'
++
++test_expect_success 'renaming a section with an overly-long line' '
++ {
++ printf "[b]\\n" &&
++ printf " c = d %525000s e" " " &&
++ printf "[a] g = h\\n"
++ } >y &&
++ test_must_fail git config -f y --rename-section a xyz 2>err &&
++ test_i18ngrep "refusing to work with overly long line in .y. on line 2" err
++'
++
+ cat >> .git/config << EOF
+ [branch "zwei"] a = 1 [branch "vier"]
+ EOF
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/git/git.inc b/meta/recipes-devtools/git/git.inc
index 879920d97e..e64472ea28 100644
--- a/meta/recipes-devtools/git/git.inc
+++ b/meta/recipes-devtools/git/git.inc
@@ -11,8 +11,26 @@ SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \
${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages \
file://fixsort.patch \
file://CVE-2021-40330.patch \
+ file://CVE-2022-23521.patch \
+ file://CVE-2022-41903-01.patch \
+ file://CVE-2022-41903-02.patch \
+ file://CVE-2022-41903-03.patch \
+ file://CVE-2022-41903-04.patch \
+ file://CVE-2022-41903-05.patch \
+ file://CVE-2022-41903-06.patch \
+ file://CVE-2022-41903-07.patch \
+ file://CVE-2022-41903-08.patch \
+ file://CVE-2022-41903-09.patch \
+ file://CVE-2022-41903-10.patch \
+ file://CVE-2022-41903-11.patch \
+ file://CVE-2022-41903-12.patch \
+ file://CVE-2023-22490-1.patch \
+ file://CVE-2023-22490-2.patch \
+ file://CVE-2023-22490-3.patch \
+ file://CVE-2023-23946.patch \
+ file://CVE-2023-29007.patch \
+ file://CVE-2023-25652.patch \
"
-
S = "${WORKDIR}/git-${PV}"
LIC_FILES_CHKSUM = "file://COPYING;md5=7c0d7ef03a7eb04ce795b0f60e68e7e1"
@@ -22,7 +40,11 @@ CVE_PRODUCT = "git-scm:git"
# This is about a manpage not mentioning --mirror may "leak" information
# in mirrored git repos. Most OE users wouldn't build the docs and
# we don't see this as a major issue for our general users/usecases.
-CVE_CHECK_IGNORE += "CVE-2022-24975"
+CVE_CHECK_WHITELIST += "CVE-2022-24975"
+# This is specific to Git-for-Windows
+CVE_CHECK_WHITELIST += "CVE-2022-41953"
+# specific to Git for Windows
+CVE_CHECK_WHITELIST += "CVE-2023-22743"
PACKAGECONFIG ??= ""
PACKAGECONFIG[cvsserver] = ""
diff --git a/meta/recipes-devtools/go/go-1.14.inc b/meta/recipes-devtools/go/go-1.14.inc
index 08d547a837..9c7ceda891 100644
--- a/meta/recipes-devtools/go/go-1.14.inc
+++ b/meta/recipes-devtools/go/go-1.14.inc
@@ -22,6 +22,76 @@ SRC_URI += "\
file://CVE-2021-38297.patch \
file://CVE-2022-23806.patch \
file://CVE-2022-23772.patch \
+ file://CVE-2021-44717.patch \
+ file://CVE-2022-24675.patch \
+ file://CVE-2021-31525.patch \
+ file://CVE-2022-30629.patch \
+ file://CVE-2022-30631.patch \
+ file://CVE-2022-30632.patch \
+ file://CVE-2022-30633.patch \
+ file://CVE-2022-30635.patch \
+ file://CVE-2022-32148.patch \
+ file://CVE-2022-32189.patch \
+ file://CVE-2021-27918.patch \
+ file://CVE-2021-36221.patch \
+ file://CVE-2021-39293.patch \
+ file://CVE-2021-41771.patch \
+ file://CVE-2022-27664.patch \
+ file://0001-CVE-2022-32190.patch \
+ file://0002-CVE-2022-32190.patch \
+ file://0003-CVE-2022-32190.patch \
+ file://0004-CVE-2022-32190.patch \
+ file://CVE-2022-2880.patch \
+ file://CVE-2022-2879.patch \
+ file://CVE-2021-33195.patch \
+ file://CVE-2021-33198.patch \
+ file://CVE-2021-44716.patch \
+ file://CVE-2022-24921.patch \
+ file://CVE-2022-28131.patch \
+ file://CVE-2022-28327.patch \
+ file://CVE-2022-41715.patch \
+ file://CVE-2022-41717.patch \
+ file://CVE-2022-1962.patch \
+ file://CVE-2022-41723.patch \
+ file://CVE-2022-41722-1.patch \
+ file://CVE-2022-41722-2.patch \
+ file://CVE-2020-29510.patch \
+ file://CVE-2023-24537.patch \
+ file://CVE-2023-24534.patch \
+ file://CVE-2023-24538-1.patch \
+ file://CVE-2023-24538-2.patch \
+ file://CVE-2023-24538_3.patch \
+ file://CVE-2023-24538_4.patch \
+ file://CVE-2023-24538_5.patch \
+ file://CVE-2023-24538_6.patch \
+ file://CVE-2023-24539.patch \
+ file://CVE-2023-24540.patch \
+ file://CVE-2023-29405-1.patch \
+ file://CVE-2023-29405-2.patch \
+ file://CVE-2023-29402.patch \
+ file://CVE-2023-29404.patch \
+ file://CVE-2023-29400.patch \
+ file://CVE-2023-29406-1.patch \
+ file://CVE-2023-29406-2.patch \
+ file://CVE-2023-29409.patch \
+ file://CVE-2022-41725-pre1.patch \
+ file://CVE-2022-41725-pre2.patch \
+ file://CVE-2022-41725-pre3.patch \
+ file://CVE-2022-41725.patch \
+ file://CVE-2023-24536_1.patch \
+ file://CVE-2023-24536_2.patch \
+ file://CVE-2023-24536_3.patch \
+ file://CVE-2023-39318.patch \
+ file://CVE-2023-39319.patch \
+ file://CVE-2023-39326.patch \
+ file://CVE-2023-45287-pre1.patch \
+ file://CVE-2023-45287-pre2.patch \
+ file://CVE-2023-45287-pre3.patch \
+ file://CVE-2023-45287.patch \
+ file://CVE-2023-45289.patch \
+ file://CVE-2023-45290.patch \
+ file://CVE-2024-24785.patch \
+ file://CVE-2024-24784.patch \
"
SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch"
@@ -32,3 +102,26 @@ SRC_URI[main.sha256sum] = "7ed13b2209e54a451835997f78035530b331c5b6943cdcd68a3d8
# https://github.com/golang/go/issues/30999#issuecomment-910470358
CVE_CHECK_WHITELIST += "CVE-2021-29923"
+# this issue affected go1.15 onwards
+# https://security-tracker.debian.org/tracker/CVE-2022-29526
+CVE_CHECK_WHITELIST += "CVE-2022-29526"
+
+# Issue only on windows
+CVE_CHECK_WHITELIST += "CVE-2022-29804"
+CVE_CHECK_WHITELIST += "CVE-2022-30580"
+CVE_CHECK_WHITELIST += "CVE-2022-30634"
+
+# Issue is in golang.org/x/net/html/parse.go, not used in go compiler
+CVE_CHECK_WHITELIST += "CVE-2021-33194"
+
+# Issue introduced in go1.16, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2021-41772"
+
+# Fixes code that was added in go1.16, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2022-30630"
+
+# This is specific to Microsoft Windows
+CVE_CHECK_WHITELIST += "CVE-2022-41716"
+
+# Issue introduced in go1.15beta1, does not exist in 1.14
+CVE_CHECK_WHITELIST += "CVE-2022-1705"
diff --git a/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch
new file mode 100644
index 0000000000..ad263b8023
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0001-CVE-2022-32190.patch
@@ -0,0 +1,74 @@
+From 755f2dc35a19e6806de3ecbf836fa06ad875c67a Mon Sep 17 00:00:00 2001
+From: Carl Johnson <me@carlmjohnson.net>
+Date: Fri, 4 Mar 2022 14:49:52 +0000
+Subject: [PATCH 1/4] net/url: add JoinPath, URL.JoinPath
+
+Builds on CL 332209.
+
+Fixes #47005
+
+Change-Id: I82708dede05d79a196ca63f5a4e7cb5ac9a041ea
+GitHub-Last-Rev: 51b735066eef74f5e67c3e8899c58f44c0383c61
+GitHub-Pull-Request: golang/go#50383
+Reviewed-on: https://go-review.googlesource.com/c/go/+/374654
+Reviewed-by: Russ Cox <rsc@golang.org>
+Auto-Submit: Russ Cox <rsc@golang.org>
+Trust: Ian Lance Taylor <iant@golang.org>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/604140d93111f89911e17cb147dcf6a02d2700d0]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 2880e82..dea8bfe 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -13,6 +13,7 @@ package url
+ import (
+ "errors"
+ "fmt"
++ "path"
+ "sort"
+ "strconv"
+ "strings"
+@@ -1104,6 +1105,17 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ return nil
+ }
+
++// JoinPath returns a new URL with the provided path elements joined to
++// any existing path and the resulting path cleaned of any ./ or ../ elements.
++func (u *URL) JoinPath(elem ...string) *URL {
++ url := *u
++ if len(elem) > 0 {
++ elem = append([]string{u.Path}, elem...)
++ url.setPath(path.Join(elem...))
++ }
++ return &url
++}
++
+ // validUserinfo reports whether s is a valid userinfo string per RFC 3986
+ // Section 3.2.1:
+ // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
+@@ -1144,3 +1156,14 @@ func stringContainsCTLByte(s string) bool {
+ }
+ return false
+ }
++
++// JoinPath returns a URL string with the provided path elements joined to
++// the existing path of base and the resulting path cleaned of any ./ or ../ elements.
++func JoinPath(base string, elem ...string) (result string, err error) {
++ url, err := Parse(base)
++ if err != nil {
++ return
++ }
++ result = url.JoinPath(elem...).String()
++ return
++}
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch
new file mode 100644
index 0000000000..1a11cc72bc
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0002-CVE-2022-32190.patch
@@ -0,0 +1,48 @@
+From 985108de87e7d2ecb2b28cb53b323d530387b884 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Thu, 31 Mar 2022 13:21:39 -0700
+Subject: [PATCH 2/4] net/url: preserve a trailing slash in JoinPath
+
+Fixes #52074
+
+Change-Id: I30897f32e70a6ca0c4e11aaf07088c27336efaba
+Reviewed-on: https://go-review.googlesource.com/c/go/+/397256
+Trust: Ian Lance Taylor <iant@golang.org>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matt Layher <mdlayher@gmail.com>
+Trust: Matt Layher <mdlayher@gmail.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/dbb52cc9f3e83a3040f46c2ae7650c15ab342179]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index dea8bfe..3436707 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1107,11 +1107,18 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+
+ // JoinPath returns a new URL with the provided path elements joined to
+ // any existing path and the resulting path cleaned of any ./ or ../ elements.
++// Any sequences of multiple / characters will be reduced to a single /.
+ func (u *URL) JoinPath(elem ...string) *URL {
+ url := *u
+ if len(elem) > 0 {
+ elem = append([]string{u.Path}, elem...)
+- url.setPath(path.Join(elem...))
++ p := path.Join(elem...)
++ // path.Join will remove any trailing slashes.
++ // Preserve at least one.
++ if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
++ p += "/"
++ }
++ url.setPath(p)
+ }
+ return &url
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch
new file mode 100644
index 0000000000..816d914983
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0003-CVE-2022-32190.patch
@@ -0,0 +1,36 @@
+From 2c632b883b0f11084cc247c8b50ad6c71fa7b447 Mon Sep 17 00:00:00 2001
+From: Sean Liao <sean@liao.dev>
+Date: Sat, 9 Jul 2022 18:38:45 +0100
+Subject: [PATCH 3/4] net/url: use EscapedPath for url.JoinPath
+
+Fixes #53763
+
+Change-Id: I08b53f159ebdce7907e8cc17316fd0c982363239
+Reviewed-on: https://go-review.googlesource.com/c/go/+/416774
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Bryan Mills <bcmills@google.com>
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf5898ef53d1693aa572da0da746c05e9a6f15c5]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 3436707..73079a5 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1111,7 +1111,7 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ func (u *URL) JoinPath(elem ...string) *URL {
+ url := *u
+ if len(elem) > 0 {
+- elem = append([]string{u.Path}, elem...)
++ elem = append([]string{u.EscapedPath()}, elem...)
+ p := path.Join(elem...)
+ // path.Join will remove any trailing slashes.
+ // Preserve at least one.
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch b/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch
new file mode 100644
index 0000000000..4bdff3aed4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/0004-CVE-2022-32190.patch
@@ -0,0 +1,82 @@
+From f61e428699cbb52bab31fe2c124f49d085a209fe Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 12 Aug 2022 16:21:09 -0700
+Subject: [PATCH 4/4] net/url: consistently remove ../ elements in JoinPath
+
+JoinPath would fail to remove relative elements from the start of
+the path when the first path element is "".
+
+In addition, JoinPath would return the original path unmodified
+when provided with no elements to join, violating the documented
+behavior of always cleaning the resulting path.
+
+Correct both these cases.
+
+ JoinPath("http://go.dev", "../go")
+ // before: http://go.dev/../go
+ // after: http://go.dev/go
+
+ JoinPath("http://go.dev/../go")
+ // before: http://go.dev/../go
+ // after: http://go.dev/go
+
+For #54385.
+Fixes #54635.
+Fixes CVE-2022-32190.
+
+Change-Id: I6d22cd160d097c50703dd96e4f453c6c118fd5d9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/423514
+Reviewed-by: David Chase <drchase@google.com>
+Reviewed-by: Alan Donovan <adonovan@google.com>
+(cherry picked from commit 0765da5884adcc8b744979303a36a27092d8fc51)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/425357
+Run-TryBot: Damien Neil <dneil@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/28335508913a46e05ef0c04a18e8a1a6beb775ec]
+CVE: CVE-2022-32190
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/net/url/url.go | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/src/net/url/url.go b/src/net/url/url.go
+index 73079a5..1e8baf9 100644
+--- a/src/net/url/url.go
++++ b/src/net/url/url.go
+@@ -1109,17 +1109,23 @@ func (u *URL) UnmarshalBinary(text []byte) error {
+ // any existing path and the resulting path cleaned of any ./ or ../ elements.
+ // Any sequences of multiple / characters will be reduced to a single /.
+ func (u *URL) JoinPath(elem ...string) *URL {
+- url := *u
+- if len(elem) > 0 {
+- elem = append([]string{u.EscapedPath()}, elem...)
+- p := path.Join(elem...)
+- // path.Join will remove any trailing slashes.
+- // Preserve at least one.
+- if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
+- p += "/"
+- }
+- url.setPath(p)
++ elem = append([]string{u.EscapedPath()}, elem...)
++ var p string
++ if !strings.HasPrefix(elem[0], "/") {
++ // Return a relative path if u is relative,
++ // but ensure that it contains no ../ elements.
++ elem[0] = "/" + elem[0]
++ p = path.Join(elem...)[1:]
++ } else {
++ p = path.Join(elem...)
+ }
++ // path.Join will remove any trailing slashes.
++ // Preserve at least one.
++ if strings.HasSuffix(elem[len(elem)-1], "/") && !strings.HasSuffix(p, "/") {
++ p += "/"
++ }
++ url := *u
++ url.setPath(p)
+ return &url
+ }
+
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch b/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch
new file mode 100644
index 0000000000..e1c9e0bdb9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2020-29510.patch
@@ -0,0 +1,65 @@
+From a0bf4d38dc2057d28396594264bbdd43d412de22 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Tue, 27 Oct 2020 00:21:30 +0100
+Subject: [PATCH] encoding/xml: replace comments inside directives with a space
+
+A Directive (like <!ENTITY xxx []>) can't have other nodes nested inside
+it (in our data structure representation), so there is no way to
+preserve comments. The previous behavior was to just elide them, which
+however might change the semantic meaning of the surrounding markup.
+Instead, replace them with a space which hopefully has the same semantic
+effect of the comment.
+
+Directives are not actually a node type in the XML spec, which instead
+specifies each of them separately (<!ENTITY, <!DOCTYPE, etc.), each with
+its own grammar. The rules for where and when the comments are allowed
+are not straightforward, and can't be implemented without implementing
+custom logic for each of the directives.
+
+Simply preserving the comments in the body of the directive would be
+problematic, as there can be unmatched quotes inside the comment.
+Whether those quotes are considered meaningful semantically or not,
+other parsers might disagree and interpret the output differently.
+
+This issue was reported by Juho Nurminen of Mattermost as it leads to
+round-trip mismatches. See #43168. It's not being fixed in a security
+release because round-trip stability is not a currently supported
+security property of encoding/xml, and we don't believe these fixes
+would be sufficient to reliably guarantee it in the future.
+
+Fixes CVE-2020-29510
+Updates #43168
+
+Change-Id: Icd86c75beff3e1e0689543efebdad10ed5178ce3
+Reviewed-on: https://go-review.googlesource.com/c/go/+/277893
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Trust: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/a9cfd55e2b09735a25976d1b008a0a3c767494f8
+CVE: CVE-2020-29510
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/encoding/xml/xml.go | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/encoding/xml/xml.go b/src/encoding/xml/xml.go
+index 01a1460..98647b2 100644
+--- a/src/encoding/xml/xml.go
++++ b/src/encoding/xml/xml.go
+@@ -768,6 +768,12 @@ func (d *Decoder) rawToken() (Token, error) {
+ }
+ b0, b1 = b1, b
+ }
++
++ // Replace the comment with a space in the returned Directive
++ // body, so that markup parts that were separated by the comment
++ // (like a "<" and a "!") don't get joined when re-encoding the
++ // Directive, taking new semantic meaning.
++ d.buf.WriteByte(' ')
+ }
+ }
+ return Directive(d.buf.Bytes()), nil
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch
new file mode 100644
index 0000000000..faa3f7f641
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-27918.patch
@@ -0,0 +1,191 @@
+From d0b79e3513a29628f3599dc8860666b6eed75372 Mon Sep 17 00:00:00 2001
+From: Katie Hockman <katie@golang.org>
+Date: Mon, 1 Mar 2021 09:54:00 -0500
+Subject: [PATCH] encoding/xml: prevent infinite loop while decoding
+
+This change properly handles a TokenReader which
+returns an EOF in the middle of an open XML
+element.
+
+Thanks to Sam Whited for reporting this.
+
+Fixes CVE-2021-27918
+Fixes #44913
+
+Change-Id: Id02a3f3def4a1b415fa2d9a8e3b373eb6cb0f433
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1004594
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Filippo Valsorda <valsorda@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/300391
+Trust: Katie Hockman <katie@golang.org>
+Run-TryBot: Katie Hockman <katie@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Alexander Rakoczy <alex@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+
+https://github.com/golang/go/commit/d0b79e3513a29628f3599dc8860666b6eed75372
+CVE: CVE-2021-27918
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/encoding/xml/xml.go | 19 ++++---
+ src/encoding/xml/xml_test.go | 104 +++++++++++++++++++++++++++--------
+ 2 files changed, 92 insertions(+), 31 deletions(-)
+
+diff --git a/src/encoding/xml/xml.go b/src/encoding/xml/xml.go
+index adaf4daf198b9..6f9594d7ba7a3 100644
+--- a/src/encoding/xml/xml.go
++++ b/src/encoding/xml/xml.go
+@@ -271,7 +271,7 @@ func NewTokenDecoder(t TokenReader) *Decoder {
+ // it will return an error.
+ //
+ // Token implements XML name spaces as described by
+-// https://www.w3.org/TR/REC-xml-names/. Each of the
++// https://www.w3.org/TR/REC-xml-names/. Each of the
+ // Name structures contained in the Token has the Space
+ // set to the URL identifying its name space when known.
+ // If Token encounters an unrecognized name space prefix,
+@@ -285,16 +285,17 @@ func (d *Decoder) Token() (Token, error) {
+ if d.nextToken != nil {
+ t = d.nextToken
+ d.nextToken = nil
+- } else if t, err = d.rawToken(); err != nil {
+- switch {
+- case err == io.EOF && d.t != nil:
+- err = nil
+- case err == io.EOF && d.stk != nil && d.stk.kind != stkEOF:
+- err = d.syntaxError("unexpected EOF")
++ } else {
++ if t, err = d.rawToken(); t == nil && err != nil {
++ if err == io.EOF && d.stk != nil && d.stk.kind != stkEOF {
++ err = d.syntaxError("unexpected EOF")
++ }
++ return nil, err
+ }
+- return t, err
++ // We still have a token to process, so clear any
++ // errors (e.g. EOF) and proceed.
++ err = nil
+ }
+-
+ if !d.Strict {
+ if t1, ok := d.autoClose(t); ok {
+ d.nextToken = t
+diff --git a/src/encoding/xml/xml_test.go b/src/encoding/xml/xml_test.go
+index efddca43e9102..5672ebb375f0d 100644
+--- a/src/encoding/xml/xml_test.go
++++ b/src/encoding/xml/xml_test.go
+@@ -33,30 +33,90 @@ func (t *toks) Token() (Token, error) {
+
+ func TestDecodeEOF(t *testing.T) {
+ start := StartElement{Name: Name{Local: "test"}}
+- t.Run("EarlyEOF", func(t *testing.T) {
+- d := NewTokenDecoder(&toks{earlyEOF: true, t: []Token{
+- start,
+- start.End(),
+- }})
+- err := d.Decode(&struct {
+- XMLName Name `xml:"test"`
+- }{})
+- if err != nil {
+- t.Error(err)
++ tests := []struct {
++ name string
++ tokens []Token
++ ok bool
++ }{
++ {
++ name: "OK",
++ tokens: []Token{
++ start,
++ start.End(),
++ },
++ ok: true,
++ },
++ {
++ name: "Malformed",
++ tokens: []Token{
++ start,
++ StartElement{Name: Name{Local: "bad"}},
++ start.End(),
++ },
++ ok: false,
++ },
++ }
++ for _, tc := range tests {
++ for _, eof := range []bool{true, false} {
++ name := fmt.Sprintf("%s/earlyEOF=%v", tc.name, eof)
++ t.Run(name, func(t *testing.T) {
++ d := NewTokenDecoder(&toks{
++ earlyEOF: eof,
++ t: tc.tokens,
++ })
++ err := d.Decode(&struct {
++ XMLName Name `xml:"test"`
++ }{})
++ if tc.ok && err != nil {
++ t.Fatalf("d.Decode: expected nil error, got %v", err)
++ }
++ if _, ok := err.(*SyntaxError); !tc.ok && !ok {
++ t.Errorf("d.Decode: expected syntax error, got %v", err)
++ }
++ })
+ }
+- })
+- t.Run("LateEOF", func(t *testing.T) {
+- d := NewTokenDecoder(&toks{t: []Token{
+- start,
+- start.End(),
+- }})
+- err := d.Decode(&struct {
+- XMLName Name `xml:"test"`
+- }{})
+- if err != nil {
+- t.Error(err)
++ }
++}
++
++type toksNil struct {
++ returnEOF bool
++ t []Token
++}
++
++func (t *toksNil) Token() (Token, error) {
++ if len(t.t) == 0 {
++ if !t.returnEOF {
++ // Return nil, nil before returning an EOF. It's legal, but
++ // discouraged.
++ t.returnEOF = true
++ return nil, nil
+ }
+- })
++ return nil, io.EOF
++ }
++ var tok Token
++ tok, t.t = t.t[0], t.t[1:]
++ return tok, nil
++}
++
++func TestDecodeNilToken(t *testing.T) {
++ for _, strict := range []bool{true, false} {
++ name := fmt.Sprintf("Strict=%v", strict)
++ t.Run(name, func(t *testing.T) {
++ start := StartElement{Name: Name{Local: "test"}}
++ bad := StartElement{Name: Name{Local: "bad"}}
++ d := NewTokenDecoder(&toksNil{
++ // Malformed
++ t: []Token{start, bad, start.End()},
++ })
++ d.Strict = strict
++ err := d.Decode(&struct {
++ XMLName Name `xml:"test"`
++ }{})
++ if _, ok := err.(*SyntaxError); !ok {
++ t.Errorf("d.Decode: expected syntax error, got %v", err)
++ }
++ })
++ }
+ }
+
+ const testInput = `
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch
new file mode 100644
index 0000000000..afe4b0d2b8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-31525.patch
@@ -0,0 +1,38 @@
+From efb465ada003d23353a91ef930be408eb575dba6 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 16 Jun 2022 17:40:12 +0530
+Subject: [PATCH] CVE-2021-31525
+
+Upstream-Status: Backport [https://github.com/argoheyard/lang-net/commit/701957006ef151feb43f86aa99c8a1f474f69282]
+CVE: CVE-2021-31525
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ src/vendor/golang.org/x/net/http/httpguts/httplex.go | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/src/vendor/golang.org/x/net/http/httpguts/httplex.go b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
+index e7de24e..c79aa73 100644
+--- a/src/vendor/golang.org/x/net/http/httpguts/httplex.go
++++ b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
+@@ -137,11 +137,13 @@ func trimOWS(x string) string {
+ // contains token amongst its comma-separated tokens, ASCII
+ // case-insensitively.
+ func headerValueContainsToken(v string, token string) bool {
+- v = trimOWS(v)
+- if comma := strings.IndexByte(v, ','); comma != -1 {
+- return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
++ for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
++ if tokenEqual(trimOWS(v[:comma]), token) {
++ return true
++ }
++ v = v[comma+1:]
+ }
+- return tokenEqual(v, token)
++ return tokenEqual(trimOWS(v), token)
+ }
+
+ // lowerASCII returns the ASCII lowercase version of b.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch
new file mode 100644
index 0000000000..3d9de888ff
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33195.patch
@@ -0,0 +1,373 @@
+From 9324d7e53151e9dfa4b25af994a28c2e0b11f729 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Thu, 27 May 2021 10:40:06 -0700
+Subject: [PATCH] net: verify results from Lookup* are valid domain names
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/31d60cda1f58b7558fc5725d2b9e4531655d980e]
+CVE: CVE-2021-33195
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+For the methods LookupCNAME, LookupSRV, LookupMX, LookupNS, and
+LookupAddr check that the returned domain names are in fact valid DNS
+names using the existing isDomainName function.
+
+Thanks to Philipp Jeitner and Haya Shulman from Fraunhofer SIT for
+reporting this issue.
+
+Updates #46241
+Fixes #46356
+Fixes CVE-2021-33195
+
+Change-Id: I47a4f58c031cb752f732e88bbdae7f819f0af4f3
+Reviewed-on: https://go-review.googlesource.com/c/go/+/323131
+Trust: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+(cherry picked from commit cdcd02842da7c004efd023881e3719105209c908)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/323269
+---
+ src/net/dnsclient_unix_test.go | 157 +++++++++++++++++++++++++++++++++
+ src/net/lookup.go | 111 ++++++++++++++++++++---
+ 2 files changed, 255 insertions(+), 13 deletions(-)
+
+diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go
+index 2ad40df..b8617d9 100644
+--- a/src/net/dnsclient_unix_test.go
++++ b/src/net/dnsclient_unix_test.go
+@@ -1800,3 +1800,160 @@ func TestPTRandNonPTR(t *testing.T) {
+ t.Errorf("names = %q; want %q", names, want)
+ }
+ }
++
++func TestCVE202133195(t *testing.T) {
++ fake := fakeDNSServer{
++ rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) {
++ r := dnsmessage.Message{
++ Header: dnsmessage.Header{
++ ID: q.Header.ID,
++ Response: true,
++ RCode: dnsmessage.RCodeSuccess,
++ RecursionAvailable: true,
++ },
++ Questions: q.Questions,
++ }
++ switch q.Questions[0].Type {
++ case dnsmessage.TypeCNAME:
++ r.Answers = []dnsmessage.Resource{}
++ case dnsmessage.TypeA: // CNAME lookup uses a A/AAAA as a proxy
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeA,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.AResource{
++ A: TestAddr,
++ },
++ },
++ )
++ case dnsmessage.TypeSRV:
++ n := q.Questions[0].Name
++ if n.String() == "_hdr._tcp.golang.org." {
++ n = dnsmessage.MustNewName("<html>.golang.org.")
++ }
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: n,
++ Type: dnsmessage.TypeSRV,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.SRVResource{
++ Target: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypeMX:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeMX,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.MXResource{
++ MX: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypeNS:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypeNS,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.NSResource{
++ NS: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ case dnsmessage.TypePTR:
++ r.Answers = append(r.Answers,
++ dnsmessage.Resource{
++ Header: dnsmessage.ResourceHeader{
++ Name: dnsmessage.MustNewName("<html>.golang.org."),
++ Type: dnsmessage.TypePTR,
++ Class: dnsmessage.ClassINET,
++ Length: 4,
++ },
++ Body: &dnsmessage.PTRResource{
++ PTR: dnsmessage.MustNewName("<html>.golang.org."),
++ },
++ },
++ )
++ }
++ return r, nil
++ },
++ }
++
++ r := Resolver{PreferGo: true, Dial: fake.DialContext}
++ // Change the default resolver to match our manipulated resolver
++ originalDefault := DefaultResolver
++ DefaultResolver = &r
++ defer func() {
++ DefaultResolver = originalDefault
++ }()
++
++ _, err := r.LookupCNAME(context.Background(), "golang.org")
++ if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupCNAME("golang.org")
++ if expected := "lookup golang.org: CNAME target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupCNAME returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, _, err = r.LookupSRV(context.Background(), "target", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, _, err = LookupSRV("target", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, _, err = r.LookupSRV(context.Background(), "hdr", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, _, err = LookupSRV("hdr", "tcp", "golang.org")
++ if expected := "lookup golang.org: SRV header name is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupSRV returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupMX(context.Background(), "golang.org")
++ if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupMX("golang.org")
++ if expected := "lookup golang.org: MX target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupMX returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupNS(context.Background(), "golang.org")
++ if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupNS("golang.org")
++ if expected := "lookup golang.org: NS target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupNS returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++
++ _, err = r.LookupAddr(context.Background(), "1.2.3.4")
++ if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("Resolver.LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++ _, err = LookupAddr("1.2.3.4")
++ if expected := "lookup 1.2.3.4: PTR target is invalid"; err == nil || err.Error() != expected {
++ t.Errorf("LookupAddr returned unexpected error, got %q, want %q", err.Error(), expected)
++ }
++}
+diff --git a/src/net/lookup.go b/src/net/lookup.go
+index 9cebd10..05e88e4 100644
+--- a/src/net/lookup.go
++++ b/src/net/lookup.go
+@@ -364,8 +364,11 @@ func (r *Resolver) LookupPort(ctx context.Context, network, service string) (por
+ // LookupCNAME does not return an error if host does not
+ // contain DNS "CNAME" records, as long as host resolves to
+ // address records.
++//
++// The returned canonical name is validated to be a properly
++// formatted presentation-format domain name.
+ func LookupCNAME(host string) (cname string, err error) {
+- return DefaultResolver.lookupCNAME(context.Background(), host)
++ return DefaultResolver.LookupCNAME(context.Background(), host)
+ }
+
+ // LookupCNAME returns the canonical name for the given host.
+@@ -378,8 +381,18 @@ func LookupCNAME(host string) (cname string, err error) {
+ // LookupCNAME does not return an error if host does not
+ // contain DNS "CNAME" records, as long as host resolves to
+ // address records.
+-func (r *Resolver) LookupCNAME(ctx context.Context, host string) (cname string, err error) {
+- return r.lookupCNAME(ctx, host)
++//
++// The returned canonical name is validated to be a properly
++// formatted presentation-format domain name.
++func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) {
++ cname, err := r.lookupCNAME(ctx, host)
++ if err != nil {
++ return "", err
++ }
++ if !isDomainName(cname) {
++ return "", &DNSError{Err: "CNAME target is invalid", Name: host}
++ }
++ return cname, nil
+ }
+
+ // LookupSRV tries to resolve an SRV query of the given service,
+@@ -391,8 +404,11 @@ func (r *Resolver) LookupCNAME(ctx context.Context, host string) (cname string,
+ // That is, it looks up _service._proto.name. To accommodate services
+ // publishing SRV records under non-standard names, if both service
+ // and proto are empty strings, LookupSRV looks up name directly.
++//
++// The returned service names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err error) {
+- return DefaultResolver.lookupSRV(context.Background(), service, proto, name)
++ return DefaultResolver.LookupSRV(context.Background(), service, proto, name)
+ }
+
+ // LookupSRV tries to resolve an SRV query of the given service,
+@@ -404,28 +420,82 @@ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err err
+ // That is, it looks up _service._proto.name. To accommodate services
+ // publishing SRV records under non-standard names, if both service
+ // and proto are empty strings, LookupSRV looks up name directly.
+-func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*SRV, err error) {
+- return r.lookupSRV(ctx, service, proto, name)
++//
++// The returned service names are validated to be properly
++// formatted presentation-format domain names.
++func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
++ cname, addrs, err := r.lookupSRV(ctx, service, proto, name)
++ if err != nil {
++ return "", nil, err
++ }
++ if cname != "" && !isDomainName(cname) {
++ return "", nil, &DNSError{Err: "SRV header name is invalid", Name: name}
++ }
++ for _, addr := range addrs {
++ if addr == nil {
++ continue
++ }
++ if !isDomainName(addr.Target) {
++ return "", nil, &DNSError{Err: "SRV target is invalid", Name: name}
++ }
++ }
++ return cname, addrs, nil
+ }
+
+ // LookupMX returns the DNS MX records for the given domain name sorted by preference.
++//
++// The returned mail server names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupMX(name string) ([]*MX, error) {
+- return DefaultResolver.lookupMX(context.Background(), name)
++ return DefaultResolver.LookupMX(context.Background(), name)
+ }
+
+ // LookupMX returns the DNS MX records for the given domain name sorted by preference.
++//
++// The returned mail server names are validated to be properly
++// formatted presentation-format domain names.
+ func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) {
+- return r.lookupMX(ctx, name)
++ records, err := r.lookupMX(ctx, name)
++ if err != nil {
++ return nil, err
++ }
++ for _, mx := range records {
++ if mx == nil {
++ continue
++ }
++ if !isDomainName(mx.Host) {
++ return nil, &DNSError{Err: "MX target is invalid", Name: name}
++ }
++ }
++ return records, nil
+ }
+
+ // LookupNS returns the DNS NS records for the given domain name.
++//
++// The returned name server names are validated to be properly
++// formatted presentation-format domain names.
+ func LookupNS(name string) ([]*NS, error) {
+- return DefaultResolver.lookupNS(context.Background(), name)
++ return DefaultResolver.LookupNS(context.Background(), name)
+ }
+
+ // LookupNS returns the DNS NS records for the given domain name.
++//
++// The returned name server names are validated to be properly
++// formatted presentation-format domain names.
+ func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) {
+- return r.lookupNS(ctx, name)
++ records, err := r.lookupNS(ctx, name)
++ if err != nil {
++ return nil, err
++ }
++ for _, ns := range records {
++ if ns == nil {
++ continue
++ }
++ if !isDomainName(ns.Host) {
++ return nil, &DNSError{Err: "NS target is invalid", Name: name}
++ }
++ }
++ return records, nil
+ }
+
+ // LookupTXT returns the DNS TXT records for the given domain name.
+@@ -441,14 +511,29 @@ func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error)
+ // LookupAddr performs a reverse lookup for the given address, returning a list
+ // of names mapping to that address.
+ //
++// The returned names are validated to be properly formatted presentation-format
++// domain names.
++//
+ // When using the host C library resolver, at most one result will be
+ // returned. To bypass the host resolver, use a custom Resolver.
+ func LookupAddr(addr string) (names []string, err error) {
+- return DefaultResolver.lookupAddr(context.Background(), addr)
++ return DefaultResolver.LookupAddr(context.Background(), addr)
+ }
+
+ // LookupAddr performs a reverse lookup for the given address, returning a list
+ // of names mapping to that address.
+-func (r *Resolver) LookupAddr(ctx context.Context, addr string) (names []string, err error) {
+- return r.lookupAddr(ctx, addr)
++//
++// The returned names are validated to be properly formatted presentation-format
++// domain names.
++func (r *Resolver) LookupAddr(ctx context.Context, addr string) ([]string, error) {
++ names, err := r.lookupAddr(ctx, addr)
++ if err != nil {
++ return nil, err
++ }
++ for _, name := range names {
++ if !isDomainName(name) {
++ return nil, &DNSError{Err: "PTR target is invalid", Name: addr}
++ }
++ }
++ return names, nil
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch
new file mode 100644
index 0000000000..241c08dad7
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-33198.patch
@@ -0,0 +1,113 @@
+From c8866491ac424cdf39aedb325e6dec9e54418cfb Mon Sep 17 00:00:00 2001
+From: Robert Griesemer <gri@golang.org>
+Date: Sun, 2 May 2021 11:27:03 -0700
+Subject: [PATCH] math/big: check for excessive exponents in Rat.SetString
+
+CVE-2021-33198
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/df9ce19db6df32d94eae8760927bdfbc595433c3]
+CVE: CVE-2021-33198
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Found by OSS-Fuzz https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=33284
+
+Thanks to Emmanuel Odeke for reporting this issue.
+
+Updates #45910
+Fixes #46305
+Fixes CVE-2021-33198
+
+Change-Id: I61e7b04dbd80343420b57eede439e361c0f7b79c
+Reviewed-on: https://go-review.googlesource.com/c/go/+/316149
+Trust: Robert Griesemer <gri@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+Run-TryBot: Robert Griesemer <gri@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+(cherry picked from commit 6c591f79b0b5327549bd4e94970f7a279efb4ab0)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321831
+Run-TryBot: Katie Hockman <katie@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+---
+ src/math/big/ratconv.go | 15 ++++++++-------
+ src/math/big/ratconv_test.go | 25 +++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+), 7 deletions(-)
+
+diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go
+index e8cbdbe..90053a9 100644
+--- a/src/math/big/ratconv.go
++++ b/src/math/big/ratconv.go
+@@ -51,7 +51,8 @@ func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
+ // An optional base-10 ``e'' or base-2 ``p'' (or their upper-case variants)
+ // exponent may be provided as well, except for hexadecimal floats which
+ // only accept an (optional) ``p'' exponent (because an ``e'' or ``E'' cannot
+-// be distinguished from a mantissa digit).
++// be distinguished from a mantissa digit). If the exponent's absolute value
++// is too large, the operation may fail.
+ // The entire string, not just a prefix, must be valid for success. If the
+ // operation failed, the value of z is undefined but the returned value is nil.
+ func (z *Rat) SetString(s string) (*Rat, bool) {
+@@ -174,6 +175,9 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
+ return nil, false
+ }
+ }
++ if n > 1e6 {
++ return nil, false // avoid excessively large exponents
++ }
+ pow5 := z.b.abs.expNN(natFive, nat(nil).setWord(Word(n)), nil) // use underlying array of z.b.abs
+ if exp5 > 0 {
+ z.a.abs = z.a.abs.mul(z.a.abs, pow5)
+@@ -186,15 +190,12 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
+ }
+
+ // apply exp2 contributions
++ if exp2 < -1e7 || exp2 > 1e7 {
++ return nil, false // avoid excessively large exponents
++ }
+ if exp2 > 0 {
+- if int64(uint(exp2)) != exp2 {
+- panic("exponent too large")
+- }
+ z.a.abs = z.a.abs.shl(z.a.abs, uint(exp2))
+ } else if exp2 < 0 {
+- if int64(uint(-exp2)) != -exp2 {
+- panic("exponent too large")
+- }
+ z.b.abs = z.b.abs.shl(z.b.abs, uint(-exp2))
+ }
+
+diff --git a/src/math/big/ratconv_test.go b/src/math/big/ratconv_test.go
+index b820df4..e55e655 100644
+--- a/src/math/big/ratconv_test.go
++++ b/src/math/big/ratconv_test.go
+@@ -590,3 +590,28 @@ func TestIssue31184(t *testing.T) {
+ }
+ }
+ }
++
++func TestIssue45910(t *testing.T) {
++ var x Rat
++ for _, test := range []struct {
++ input string
++ want bool
++ }{
++ {"1e-1000001", false},
++ {"1e-1000000", true},
++ {"1e+1000000", true},
++ {"1e+1000001", false},
++
++ {"0p1000000000000", true},
++ {"1p-10000001", false},
++ {"1p-10000000", true},
++ {"1p+10000000", true},
++ {"1p+10000001", false},
++ {"1.770p02041010010011001001", false}, // test case from issue
++ } {
++ _, got := x.SetString(test.input)
++ if got != test.want {
++ t.Errorf("SetString(%s) got ok = %v; want %v", test.input, got, test.want)
++ }
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch
new file mode 100644
index 0000000000..9c00d4ebb2
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-36221.patch
@@ -0,0 +1,101 @@
+From b7a85e0003cedb1b48a1fd3ae5b746ec6330102e Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 7 Jul 2021 16:34:34 -0700
+Subject: [PATCH] net/http/httputil: close incoming ReverseProxy request body
+
+Reading from an incoming request body after the request handler aborts
+with a panic can cause a panic, becuse http.Server does not (contrary
+to its documentation) close the request body in this case.
+
+Always close the incoming request body in ReverseProxy.ServeHTTP to
+ensure that any in-flight outgoing requests using the body do not
+read from it.
+
+Updates #46866
+Fixes CVE-2021-36221
+
+Change-Id: I310df269200ad8732c5d9f1a2b00de68725831df
+Reviewed-on: https://go-review.googlesource.com/c/go/+/333191
+Trust: Damien Neil <dneil@google.com>
+Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+
+https://github.com/golang/go/commit/b7a85e0003cedb1b48a1fd3ae5b746ec6330102e
+CVE: CVE-2021-36221
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/net/http/httputil/reverseproxy.go | 9 +++++
+ src/net/http/httputil/reverseproxy_test.go | 39 ++++++++++++++++++++++
+ 2 files changed, 48 insertions(+)
+
+diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
+index 5d39955d62d15..8b63368386f43 100644
+--- a/src/net/http/httputil/reverseproxy.go
++++ b/src/net/http/httputil/reverseproxy.go
+@@ -235,6 +235,15 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req.ContentLength == 0 {
+ outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
+ }
++ if outreq.Body != nil {
++ // Reading from the request body after returning from a handler is not
++ // allowed, and the RoundTrip goroutine that reads the Body can outlive
++ // this handler. This can lead to a crash if the handler panics (see
++ // Issue 46866). Although calling Close doesn't guarantee there isn't
++ // any Read in flight after the handle returns, in practice it's safe to
++ // read after closing it.
++ defer outreq.Body.Close()
++ }
+ if outreq.Header == nil {
+ outreq.Header = make(http.Header) // Issue 33142: historical behavior was to always allocate
+ }
+diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
+index 1898ed8b8afde..4b6ad77a29466 100644
+--- a/src/net/http/httputil/reverseproxy_test.go
++++ b/src/net/http/httputil/reverseproxy_test.go
+@@ -1122,6 +1122,45 @@ func TestReverseProxy_PanicBodyError(t *testing.T) {
+ rproxy.ServeHTTP(httptest.NewRecorder(), req)
+ }
+
++// Issue #46866: panic without closing incoming request body causes a panic
++func TestReverseProxy_PanicClosesIncomingBody(t *testing.T) {
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ out := "this call was relayed by the reverse proxy"
++ // Coerce a wrong content length to induce io.ErrUnexpectedEOF
++ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(out)*2))
++ fmt.Fprintln(w, out)
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := NewSingleHostReverseProxy(backendURL)
++ proxyHandler.ErrorLog = log.New(io.Discard, "", 0) // quiet for tests
++ frontend := httptest.NewServer(proxyHandler)
++ defer frontend.Close()
++ frontendClient := frontend.Client()
++
++ var wg sync.WaitGroup
++ for i := 0; i < 2; i++ {
++ wg.Add(1)
++ go func() {
++ defer wg.Done()
++ for j := 0; j < 10; j++ {
++ const reqLen = 6 * 1024 * 1024
++ req, _ := http.NewRequest("POST", frontend.URL, &io.LimitedReader{R: neverEnding('x'), N: reqLen})
++ req.ContentLength = reqLen
++ resp, _ := frontendClient.Transport.RoundTrip(req)
++ if resp != nil {
++ io.Copy(io.Discard, resp.Body)
++ resp.Body.Close()
++ }
++ }
++ }()
++ }
++ wg.Wait()
++}
++
+ func TestSelectFlushInterval(t *testing.T) {
+ tests := []struct {
+ name string
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch
new file mode 100644
index 0000000000..88fca9cad9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-39293.patch
@@ -0,0 +1,79 @@
+From 6c480017ae600b2c90a264a922e041df04dfa785 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 18 Aug 2021 11:49:29 -0700
+Subject: [PATCH] [release-branch.go1.16] archive/zip: prevent preallocation
+ check from overflowing
+
+If the indicated directory size in the archive header is so large that
+subtracting it from the archive size overflows a uint64, the check that
+the indicated number of files in the archive can be effectively
+bypassed. Prevent this from happening by checking that the indicated
+directory size is less than the size of the archive.
+
+Thanks to the OSS-Fuzz project for discovering this issue and to
+Emmanuel Odeke for reporting it.
+
+Fixes #47985
+Updates #47801
+Fixes CVE-2021-39293
+
+Change-Id: Ifade26b98a40f3b37398ca86bd5252d12394dd24
+Reviewed-on: https://go-review.googlesource.com/c/go/+/343434
+Trust: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+(cherry picked from commit bacbc33439b124ffd7392c91a5f5d96eca8c0c0b)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/345409
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Emmanuel Odeke <emmanuel@orijtech.com>
+Trust: Cherry Mui <cherryyz@google.com>
+
+https://github.com/golang/go/commit/6c480017ae600b2c90a264a922e041df04dfa785
+CVE: CVE-2021-39293
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/archive/zip/reader.go | 2 +-
+ src/archive/zip/reader_test.go | 18 ++++++++++++++++++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
+index ddef2b7b5a517..801d1313b6c32 100644
+--- a/src/archive/zip/reader.go
++++ b/src/archive/zip/reader.go
+@@ -105,7 +105,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
+ // indicate it contains up to 1 << 128 - 1 files. Since each file has a
+ // header which will be _at least_ 30 bytes we can safely preallocate
+ // if (data size / 30) >= end.directoryRecords.
+- if (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
++ if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
+ z.File = make([]*File, 0, end.directoryRecords)
+ }
+ z.Comment = end.comment
+diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
+index 471be27bb1004..99f13345d8d06 100644
+--- a/src/archive/zip/reader_test.go
++++ b/src/archive/zip/reader_test.go
+@@ -1225,3 +1225,21 @@ func TestCVE202133196(t *testing.T) {
+ t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
+ }
+ }
++
++func TestCVE202139293(t *testing.T) {
++ // directory size is so large, that the check in Reader.init
++ // overflows when subtracting from the archive size, causing
++ // the pre-allocation check to be bypassed.
++ data := []byte{
++ 0x50, 0x4b, 0x06, 0x06, 0x05, 0x06, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
++ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
++ 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
++ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
++ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
++ 0xff, 0x50, 0xfe, 0x00, 0xff, 0x00, 0x3a, 0x00, 0x00, 0x00, 0xff,
++ }
++ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
++ if err != ErrFormat {
++ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch
new file mode 100644
index 0000000000..526796dbcb
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-41771.patch
@@ -0,0 +1,86 @@
+From d19c5bdb24e093a2d5097b7623284eb02726cede Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Thu, 14 Oct 2021 13:02:01 -0700
+Subject: [PATCH] [release-branch.go1.16] debug/macho: fail on invalid dynamic
+ symbol table command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fail out when loading a file that contains a dynamic symbol table
+command that indicates a larger number of symbols than exist in the
+loaded symbol table.
+
+Thanks to Burak Çarıkçı - Yunus Yıldırım (CT-Zer0 Crypttech) for
+reporting this issue.
+
+Updates #48990
+Fixes #48991
+Fixes CVE-2021-41771
+
+Change-Id: Ic3d6e6529241afcc959544b326b21b663262bad5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/355990
+Reviewed-by: Julie Qiu <julie@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Trust: Katie Hockman <katie@golang.org>
+(cherry picked from commit 61536ec03063b4951163bd09609c86d82631fa27)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/359454
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+https://github.com/golang/go/commit/d19c5bdb24e093a2d5097b7623284eb02726cede
+CVE: CVE-2021-41771
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/debug/macho/file.go | 9 +++++++++
+ src/debug/macho/file_test.go | 7 +++++++
+ .../testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64 | 1 +
+ 3 files changed, 17 insertions(+)
+ create mode 100644 src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+
+diff --git a/src/debug/macho/file.go b/src/debug/macho/file.go
+index 085b0c8219bad..73cfce3c7606e 100644
+--- a/src/debug/macho/file.go
++++ b/src/debug/macho/file.go
+@@ -345,6 +345,15 @@ func NewFile(r io.ReaderAt) (*File, error) {
+ if err := binary.Read(b, bo, &hdr); err != nil {
+ return nil, err
+ }
++ if hdr.Iundefsym > uint32(len(f.Symtab.Syms)) {
++ return nil, &FormatError{offset, fmt.Sprintf(
++ "undefined symbols index in dynamic symbol table command is greater than symbol table length (%d > %d)",
++ hdr.Iundefsym, len(f.Symtab.Syms)), nil}
++ } else if hdr.Iundefsym+hdr.Nundefsym > uint32(len(f.Symtab.Syms)) {
++ return nil, &FormatError{offset, fmt.Sprintf(
++ "number of undefined symbols after index in dynamic symbol table command is greater than symbol table length (%d > %d)",
++ hdr.Iundefsym+hdr.Nundefsym, len(f.Symtab.Syms)), nil}
++ }
+ dat := make([]byte, hdr.Nindirectsyms*4)
+ if _, err := r.ReadAt(dat, int64(hdr.Indirectsymoff)); err != nil {
+ return nil, err
+diff --git a/src/debug/macho/file_test.go b/src/debug/macho/file_test.go
+index 03915c86e23d9..9beeb80dd27c1 100644
+--- a/src/debug/macho/file_test.go
++++ b/src/debug/macho/file_test.go
+@@ -416,3 +416,10 @@ func TestTypeString(t *testing.T) {
+ t.Errorf("got %v, want %v", TypeExec.GoString(), "macho.Exec")
+ }
+ }
++
++func TestOpenBadDysymCmd(t *testing.T) {
++ _, err := openObscured("testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64")
++ if err == nil {
++ t.Fatal("openObscured did not fail when opening a file with an invalid dynamic symbol table command")
++ }
++}
+diff --git a/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64 b/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+new file mode 100644
+index 0000000000000..8e0436639c109
+--- /dev/null
++++ b/src/debug/macho/testdata/gcc-amd64-darwin-exec-with-bad-dysym.base64
+@@ -0,0 +1 @@
++z/rt/gcAAAEDAACAAgAAAAsAAABoBQAAhQAAAAAAAAAZAAAASAAAAF9fUEFHRVpFUk8AAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAA2AEAAF9fVEVYVAAAAAAAAAAAAAAAAAAAAQAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAcAAAAFAAAABQAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAABQPAAABAAAAbQAAAAAAAAAUDwAAAgAAAAAAAAAAAAAAAAQAgAAAAAAAAAAAAAAAAF9fc3ltYm9sX3N0dWIxAABfX1RFWFQAAAAAAAAAAAAAgQ8AAAEAAAAMAAAAAAAAAIEPAAAAAAAAAAAAAAAAAAAIBACAAAAAAAYAAAAAAAAAX19zdHViX2hlbHBlcgAAAF9fVEVYVAAAAAAAAAAAAACQDwAAAQAAABgAAAAAAAAAkA8AAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2NzdHJpbmcAAAAAAAAAX19URVhUAAAAAAAAAAAAAKgPAAABAAAADQAAAAAAAACoDwAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAF9fZWhfZnJhbWUAAAAAAABfX1RFWFQAAAAAAAAAAAAAuA8AAAEAAABIAAAAAAAAALgPAAADAAAAAAAAAAAAAAALAABgAAAAAAAAAAAAAAAAGQAAADgBAABfX0RBVEEAAAAAAAAAAAAAABAAAAEAAAAAEAAAAAAAAAAQAAAAAAAAABAAAAAAAAAHAAAAAwAAAAMAAAAAAAAAX19kYXRhAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAAAAEAAAAQAAABwAAAAAAAAAABAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2R5bGQAAAAAAAAAAAAAX19EQVRBAAAAAAAAAAAAACAQAAABAAAAOAAAAAAAAAAgEAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9fbGFfc3ltYm9sX3B0cgBfX0RBVEEAAAAAAAAAAAAAWBAAAAEAAAAQAAAAAAAAAFgQAAACAAAAAAAAAAAAAAAHAAAAAgAAAAAAAAAAAAAAGQAAAEgAAABfX0xJTktFRElUAAAAAAAAACAAAAEAAAAAEAAAAAAAAAAgAAAAAAAAQAEAAAAAAAAHAAAAAQAAAAAAAAAAAAAAAgAAABgAAAAAIAAACwAAAMAgAACAAAAACwAAAFAAAAAAAAAAAgAAAAIAAAAHAAAACQAAAP8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwIAAABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAIAAAAAwAAAAvdXNyL2xpYi9keWxkAAAAAAAAABsAAAAYAAAAOyS4cg5FdtQoqu6JsMEhXQUAAAC4AAAABAAAACoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQPAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAOAAAABgAAAACAAAAAAABAAAAAQAvdXNyL2xpYi9saWJnY2Nfcy4xLmR5bGliAAAAAAAAAAwAAAA4AAAAGAAAAAIAAAAEAW8AAAABAC91c3IvbGliL2xpYlN5c3RlbS5CLmR5bGliAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABqAEiJ5UiD5PBIi30ISI11EIn6g8IBweIDSAHySInR6wRIg8EISIM5AHX2SIPBCOgiAAAAicfoMgAAAPRBU0yNHafw//9BU/8lvwAAAA8fAP8lvgAAAFVIieVIjT0zAAAA6A0AAAC4AAAAAMnD/yXRAAAA/yXTAAAAAAAATI0dwQAAAOm0////TI0dvQAAAOmo////aGVsbG8sIHdvcmxkAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACwAAAAcAAAAkv////////8XAAAAAAAAAAAEAQAAAA4QhgIEAwAAAA0GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDAX/9/AAAIEMBf/38AAAAAAAABAAAAGBAAAAEAAAAQEAAAAQAAAAgQAAABAAAAABAAAAEAAACQDwAAAQAAAJwPAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAHgEAAFAPAAABAAAAGwAAAB4BAABkDwAAAQAAAC4AAAAPBgAAGBAAAAEAAAA2AAAADwYAABAQAAABAAAAPgAAAA8GAAAAEAAAAQAAAEoAAAADABAAAAAAAAEAAABeAAAADwYAAAgQAAABAAAAZwAAAA8BAABqDwAAAQAAAG0AAAAPAQAAFA8AAAEAAABzAAAAAQABAgAAAAAAAAAAeQAAAAEAAQIAAAAAAAAAAAkAAAAKAAAACQAAAAoAAAAgAGR5bGRfc3R1Yl9iaW5kaW5nX2hlbHBlcgBfX2R5bGRfZnVuY19sb29rdXAAX05YQXJnYwBfTlhBcmd2AF9fX3Byb2duYW1lAF9fbWhfZXhlY3V0ZV9oZWFkZXIAX2Vudmlyb24AX21haW4Ac3RhcnQAX2V4aXQAX3B1dHMAAA==
+\ No newline at end of file
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch
new file mode 100644
index 0000000000..9c4fee2db4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-44716.patch
@@ -0,0 +1,93 @@
+From 9f1860075990e7bf908ca7cc329d1d3ef91741c8 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Thu, 9 Dec 2021 06:13:31 -0500
+Subject: [PATCH] net/http: update bundled golang.org/x/net/http2
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d0aebe3e74fe14799f97ddd3f01129697c6a290a]
+CVE: CVE-2021-44716
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Pull in security fix
+
+ a5309b3 http2: cap the size of the server's canonical header cache
+
+Updates #50058
+Fixes CVE-2021-44716
+
+Change-Id: Ifdd13f97fce168de5fb4b2e74ef2060d059800b9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/370575
+Trust: Filippo Valsorda <filippo@golang.org>
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Alex Rakoczy <alex@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+(cherry picked from commit d0aebe3e74fe14799f97ddd3f01129697c6a290a)
+---
+ src/go.mod | 2 +-
+ src/go.sum | 4 ++--
+ src/net/http/h2_bundle.go | 10 +++++++++-
+ src/vendor/modules.txt | 2 +-
+ 4 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/src/go.mod b/src/go.mod
+index ec6bd98..56f2fbb 100644
+--- a/src/go.mod
++++ b/src/go.mod
+@@ -4,7 +4,7 @@ go 1.14
+
+ require (
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d
+- golang.org/x/net v0.0.0-20210129194117-4acb7895a057
++ golang.org/x/net v0.0.0-20211209100217-a5309b321dca
+ golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf // indirect
+ golang.org/x/text v0.3.3-0.20191031172631-4b67af870c6f // indirect
+ )
+diff --git a/src/go.sum b/src/go.sum
+index 171e083..1ceba05 100644
+--- a/src/go.sum
++++ b/src/go.sum
+@@ -2,8 +2,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
+ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+-golang.org/x/net v0.0.0-20210129194117-4acb7895a057 h1:HThQeV5c0Ab/Puir+q6mC97b7+3dfZdsLWMLoBrzo68=
+-golang.org/x/net v0.0.0-20210129194117-4acb7895a057/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
++golang.org/x/net v0.0.0-20211209100217-a5309b321dca h1:UmeWAm8AwB6NA/e4FSaGlK1EKTLXKX3utx4Si+6kfPg=
++golang.org/x/net v0.0.0-20211209100217-a5309b321dca/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200201011859-915c9c3d4ccf h1:+4j7oujXP478CVb/AFvHJmVX5+Pczx2NGts5yirA0oY=
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 702fd5a..83f2a72 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -4293,7 +4293,15 @@ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+- sc.canonHeader[v] = cv
++ // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
++ // entries in the canonHeader cache. This should be larger than the number
++ // of unique, uncommon header keys likely to be sent by the peer, while not
++ // so high as to permit unreaasonable memory usage if the peer sends an unbounded
++ // number of unique header keys.
++ const maxCachedCanonicalHeaders = 32
++ if len(sc.canonHeader) < maxCachedCanonicalHeaders {
++ sc.canonHeader[v] = cv
++ }
+ return cv
+ }
+
+diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt
+index 669bd9b..1d67183 100644
+--- a/src/vendor/modules.txt
++++ b/src/vendor/modules.txt
+@@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519
+ golang.org/x/crypto/hkdf
+ golang.org/x/crypto/internal/subtle
+ golang.org/x/crypto/poly1305
+-# golang.org/x/net v0.0.0-20210129194117-4acb7895a057
++# golang.org/x/net v0.0.0-20211209100217-a5309b321dca
+ ## explicit
+ golang.org/x/net/dns/dnsmessage
+ golang.org/x/net/http/httpguts
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch b/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch
new file mode 100644
index 0000000000..17cac7a5ba
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2021-44717.patch
@@ -0,0 +1,83 @@
+From 9171c664e7af479aa26bc72f2e7cf4e69d8e0a6f Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 17 Jun 2022 10:22:47 +0530
+Subject: [PATCH] CVE-2021-44717
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/44a3fb49]
+CVE: CVE-2021-44717
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+syscall: fix ForkLock spurious close(0) on pipe failure
+Pipe (and therefore forkLockPipe) does not make any guarantees
+about the state of p after a failed Pipe(p). Avoid that assumption
+and the too-clever goto, so that we don't accidentally Close a real fd
+if the failed pipe leaves p[0] or p[1] set >= 0.
+
+Updates #50057
+Fixes CVE-2021-44717
+
+Change-Id: Iff8e19a6efbba0c73cc8b13ecfae381c87600bb4
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1291270
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/370514
+Trust: Filippo Valsorda <filippo@golang.org>
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Alex Rakoczy <alex@golang.org>
+---
+ src/syscall/exec_unix.go | 20 ++++++--------------
+ 1 file changed, 6 insertions(+), 14 deletions(-)
+
+diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go
+index b3798b6..b73782c 100644
+--- a/src/syscall/exec_unix.go
++++ b/src/syscall/exec_unix.go
+@@ -151,9 +151,6 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+ sys = &zeroSysProcAttr
+ }
+
+- p[0] = -1
+- p[1] = -1
+-
+ // Convert args to C form.
+ argv0p, err := BytePtrFromString(argv0)
+ if err != nil {
+@@ -194,14 +191,17 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+
+ // Allocate child status pipe close on exec.
+ if err = forkExecPipe(p[:]); err != nil {
+- goto error
++ ForkLock.Unlock()
++ return 0, err
+ }
+
+ // Kick off child.
+ pid, err1 = forkAndExecInChild(argv0p, argvp, envvp, chroot, dir, attr, sys, p[1])
+ if err1 != 0 {
+- err = Errno(err1)
+- goto error
++ Close(p[0])
++ Close(p[1])
++ ForkLock.Unlock()
++ return 0, Errno(err1)
+ }
+ ForkLock.Unlock()
+
+@@ -228,14 +228,6 @@ func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
+
+ // Read got EOF, so pipe closed on exec, so exec succeeded.
+ return pid, nil
+-
+-error:
+- if p[0] >= 0 {
+- Close(p[0])
+- Close(p[1])
+- }
+- ForkLock.Unlock()
+- return 0, err
+ }
+
+ // Combination of fork and exec, careful to be thread safe.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch
new file mode 100644
index 0000000000..b2ab5d0669
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-1962.patch
@@ -0,0 +1,357 @@
+From ba8788ebcead55e99e631c6a1157ad7b35535d11 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 15 Jun 2022 10:43:05 -0700
+Subject: [PATCH] [release-branch.go1.17] go/parser: limit recursion depth
+
+Limit nested parsing to 100,000, which prevents stack exhaustion when
+parsing deeply nested statements, types, and expressions. Also limit
+the scope depth to 1,000 during object resolution.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #53707
+Updates #53616
+Fixes CVE-2022-1962
+
+Change-Id: I4d7b86c1d75d0bf3c7af1fdea91582aa74272c64
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1491025
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 6a856f08d58e4b6705c0c337d461c540c1235c83)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/417070
+Reviewed-by: Heschi Kreinick <heschi@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ba8788ebcead55e99e631c6a1157ad7b35535d11]
+CVE: CVE-2022-1962
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/go/parser/interface.go | 10 ++-
+ src/go/parser/parser.go | 48 ++++++++--
+ src/go/parser/parser_test.go | 169 +++++++++++++++++++++++++++++++++++
+ 3 files changed, 220 insertions(+), 7 deletions(-)
+
+diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
+index 54f9d7b..537b327 100644
+--- a/src/go/parser/interface.go
++++ b/src/go/parser/interface.go
+@@ -92,8 +92,11 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode)
+ defer func() {
+ if e := recover(); e != nil {
+ // resume same panic if it's not a bailout
+- if _, ok := e.(bailout); !ok {
++ bail, ok := e.(bailout)
++ if !ok {
+ panic(e)
++ } else if bail.msg != "" {
++ p.errors.Add(p.file.Position(bail.pos), bail.msg)
+ }
+ }
+
+@@ -188,8 +191,11 @@ func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode M
+ defer func() {
+ if e := recover(); e != nil {
+ // resume same panic if it's not a bailout
+- if _, ok := e.(bailout); !ok {
++ bail, ok := e.(bailout)
++ if !ok {
+ panic(e)
++ } else if bail.msg != "" {
++ p.errors.Add(p.file.Position(bail.pos), bail.msg)
+ }
+ }
+ p.errors.Sort()
+diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
+index 31a7398..586fe90 100644
+--- a/src/go/parser/parser.go
++++ b/src/go/parser/parser.go
+@@ -64,6 +64,10 @@ type parser struct {
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
++ // nestLev is used to track and limit the recursion depth
++ // during parsing.
++ nestLev int
++
+ // Label scopes
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+@@ -236,6 +240,24 @@ func un(p *parser) {
+ p.printTrace(")")
+ }
+
++// maxNestLev is the deepest we're willing to recurse during parsing
++const maxNestLev int = 1e5
++
++func incNestLev(p *parser) *parser {
++ p.nestLev++
++ if p.nestLev > maxNestLev {
++ p.error(p.pos, "exceeded max nesting depth")
++ panic(bailout{})
++ }
++ return p
++}
++
++// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
++// It is used along with incNestLev in a similar fashion to how un and trace are used.
++func decNestLev(p *parser) {
++ p.nestLev--
++}
++
+ // Advance to the next token.
+ func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+@@ -348,8 +370,12 @@ func (p *parser) next() {
+ }
+ }
+
+-// A bailout panic is raised to indicate early termination.
+-type bailout struct{}
++// A bailout panic is raised to indicate early termination. pos and msg are
++// only populated when bailing out of object resolution.
++type bailout struct {
++ pos token.Pos
++ msg string
++}
+
+ func (p *parser) error(pos token.Pos, msg string) {
+ epos := p.file.Position(pos)
+@@ -1030,6 +1056,8 @@ func (p *parser) parseChanType() *ast.ChanType {
+
+ // If the result is an identifier, it is not resolved.
+ func (p *parser) tryIdentOrType() ast.Expr {
++ defer decNestLev(incNestLev(p))
++
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+@@ -1609,7 +1637,13 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ }
+
+ x := p.parseUnaryExpr(lhs)
+- for {
++ // We track the nesting here rather than at the entry for the function,
++ // since it can iteratively produce a nested output, and we want to
++ // limit how deep a structure we generate.
++ var n int
++ defer func() { p.nestLev -= n }()
++ for n = 1; ; n++ {
++ incNestLev(p)
+ op, oprec := p.tokPrec()
+ if oprec < prec1 {
+ return x
+@@ -1628,7 +1662,7 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ // The result may be a type or even a raw type ([...]int). Callers must
+ // check the result (using checkExpr or checkExprOrType), depending on
+ // context.
+-func (p *parser) parseExpr(lhs bool) ast.Expr {
++func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+@@ -1899,6 +1933,8 @@ func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
+ }
+
+ func (p *parser) parseIfStmt() *ast.IfStmt {
++ defer decNestLev(incNestLev(p))
++
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+@@ -2214,6 +2250,8 @@ func (p *parser) parseForStmt() ast.Stmt {
+ }
+
+ func (p *parser) parseStmt() (s ast.Stmt) {
++ defer decNestLev(incNestLev(p))
++
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
+index 25a374e..37a6a2b 100644
+--- a/src/go/parser/parser_test.go
++++ b/src/go/parser/parser_test.go
+@@ -10,6 +10,7 @@ import (
+ "go/ast"
+ "go/token"
+ "os"
++ "runtime"
+ "strings"
+ "testing"
+ )
+@@ -569,3 +570,171 @@ type x int // comment
+ t.Errorf("got %q, want %q", comment, "// comment")
+ }
+ }
++
++var parseDepthTests = []struct {
++ name string
++ format string
++ // multipler is used when a single statement may result in more than one
++ // change in the depth level, for instance "1+(..." produces a BinaryExpr
++ // followed by a UnaryExpr, which increments the depth twice. The test
++ // case comment explains which nodes are triggering the multiple depth
++ // changes.
++ parseMultiplier int
++ // scope is true if we should also test the statement for the resolver scope
++ // depth limit.
++ scope bool
++ // scopeMultiplier does the same as parseMultiplier, but for the scope
++ // depths.
++ scopeMultiplier int
++}{
++ // The format expands the part inside « » many times.
++ // A second set of brackets nested inside the first stops the repetition,
++ // so that for example «(«1»)» expands to (((...((((1))))...))).
++ {name: "array", format: "package main; var x «[1]»int"},
++ {name: "slice", format: "package main; var x «[]»int"},
++ {name: "struct", format: "package main; var x «struct { X «int» }»", scope: true},
++ {name: "pointer", format: "package main; var x «*»int"},
++ {name: "func", format: "package main; var x «func()»int", scope: true},
++ {name: "chan", format: "package main; var x «chan »int"},
++ {name: "chan2", format: "package main; var x «<-chan »int"},
++ {name: "interface", format: "package main; var x «interface { M() «int» }»", scope: true, scopeMultiplier: 2}, // Scopes: InterfaceType, FuncType
++ {name: "map", format: "package main; var x «map[int]»int"},
++ {name: "slicelit", format: "package main; var x = «[]any{«»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "arraylit", format: "package main; var x = «[1]any{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "structlit", format: "package main; var x = «struct{x any}{«nil»}»", parseMultiplier: 2}, // Parser nodes: UnaryExpr, CompositeLit
++ {name: "maplit", format: "package main; var x = «map[int]any{1:«nil»}»", parseMultiplier: 2}, // Parser nodes: CompositeLit, KeyValueExpr
++ {name: "dot", format: "package main; var x = «x.»x"},
++ {name: "index", format: "package main; var x = x«[1]»"},
++ {name: "slice", format: "package main; var x = x«[1:2]»"},
++ {name: "slice3", format: "package main; var x = x«[1:2:3]»"},
++ {name: "dottype", format: "package main; var x = x«.(any)»"},
++ {name: "callseq", format: "package main; var x = x«()»"},
++ {name: "methseq", format: "package main; var x = x«.m()»", parseMultiplier: 2}, // Parser nodes: SelectorExpr, CallExpr
++ {name: "binary", format: "package main; var x = «1+»1"},
++ {name: "binaryparen", format: "package main; var x = «1+(«1»)»", parseMultiplier: 2}, // Parser nodes: BinaryExpr, ParenExpr
++ {name: "unary", format: "package main; var x = «^»1"},
++ {name: "addr", format: "package main; var x = «& »x"},
++ {name: "star", format: "package main; var x = «*»x"},
++ {name: "recv", format: "package main; var x = «<-»x"},
++ {name: "call", format: "package main; var x = «f(«1»)»", parseMultiplier: 2}, // Parser nodes: Ident, CallExpr
++ {name: "conv", format: "package main; var x = «(*T)(«1»)»", parseMultiplier: 2}, // Parser nodes: ParenExpr, CallExpr
++ {name: "label", format: "package main; func main() { «Label:» }"},
++ {name: "if", format: "package main; func main() { «if true { «» }»}", parseMultiplier: 2, scope: true, scopeMultiplier: 2}, // Parser nodes: IfStmt, BlockStmt. Scopes: IfStmt, BlockStmt
++ {name: "ifelse", format: "package main; func main() { «if true {} else » {} }", scope: true},
++ {name: "switch", format: "package main; func main() { «switch { default: «» }»}", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
++ {name: "typeswitch", format: "package main; func main() { «switch x.(type) { default: «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: TypeSwitchStmt, CaseClause
++ {name: "for0", format: "package main; func main() { «for { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "for1", format: "package main; func main() { «for x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "for3", format: "package main; func main() { «for f(); g(); h() { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: ForStmt, BlockStmt
++ {name: "forrange0", format: "package main; func main() { «for range x { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "forrange1", format: "package main; func main() { «for x = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "forrange2", format: "package main; func main() { «for x, y = range z { «» }» }", scope: true, scopeMultiplier: 2}, // Scopes: RangeStmt, BlockStmt
++ {name: "go", format: "package main; func main() { «go func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: GoStmt, FuncLit
++ {name: "defer", format: "package main; func main() { «defer func() { «» }()» }", parseMultiplier: 2, scope: true}, // Parser nodes: DeferStmt, FuncLit
++ {name: "select", format: "package main; func main() { «select { default: «» }» }", scope: true},
++}
++
++// split splits pre«mid»post into pre, mid, post.
++// If the string does not have that form, split returns x, "", "".
++func split(x string) (pre, mid, post string) {
++ start, end := strings.Index(x, "«"), strings.LastIndex(x, "»")
++ if start < 0 || end < 0 {
++ return x, "", ""
++ }
++ return x[:start], x[start+len("«") : end], x[end+len("»"):]
++}
++
++func TestParseDepthLimit(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes call stack exhaustion on js/wasm")
++ }
++ for _, tt := range parseDepthTests {
++ for _, size := range []string{"small", "big"} {
++ t.Run(tt.name+"/"+size, func(t *testing.T) {
++ n := maxNestLev + 1
++ if tt.parseMultiplier > 0 {
++ n /= tt.parseMultiplier
++ }
++ if size == "small" {
++ // Decrease the number of statements by 10, in order to check
++ // that we do not fail when under the limit. 10 is used to
++ // provide some wiggle room for cases where the surrounding
++ // scaffolding syntax adds some noise to the depth that changes
++ // on a per testcase basis.
++ n -= 10
++ }
++
++ pre, mid, post := split(tt.format)
++ if strings.Contains(mid, "«") {
++ left, base, right := split(mid)
++ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
++ } else {
++ mid = strings.Repeat(mid, n)
++ }
++ input := pre + mid + post
++
++ fset := token.NewFileSet()
++ _, err := ParseFile(fset, "", input, ParseComments|SkipObjectResolution)
++ if size == "small" {
++ if err != nil {
++ t.Errorf("ParseFile(...): %v (want success)", err)
++ }
++ } else {
++ expected := "exceeded max nesting depth"
++ if err == nil || !strings.HasSuffix(err.Error(), expected) {
++ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
++ }
++ }
++ })
++ }
++ }
++}
++
++func TestScopeDepthLimit(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes call stack exhaustion on js/wasm")
++ }
++ for _, tt := range parseDepthTests {
++ if !tt.scope {
++ continue
++ }
++ for _, size := range []string{"small", "big"} {
++ t.Run(tt.name+"/"+size, func(t *testing.T) {
++ n := maxScopeDepth + 1
++ if tt.scopeMultiplier > 0 {
++ n /= tt.scopeMultiplier
++ }
++ if size == "small" {
++ // Decrease the number of statements by 10, in order to check
++ // that we do not fail when under the limit. 10 is used to
++ // provide some wiggle room for cases where the surrounding
++ // scaffolding syntax adds some noise to the depth that changes
++ // on a per testcase basis.
++ n -= 10
++ }
++
++ pre, mid, post := split(tt.format)
++ if strings.Contains(mid, "«") {
++ left, base, right := split(mid)
++ mid = strings.Repeat(left, n) + base + strings.Repeat(right, n)
++ } else {
++ mid = strings.Repeat(mid, n)
++ }
++ input := pre + mid + post
++
++ fset := token.NewFileSet()
++ _, err := ParseFile(fset, "", input, DeclarationErrors)
++ if size == "small" {
++ if err != nil {
++ t.Errorf("ParseFile(...): %v (want success)", err)
++ }
++ } else {
++ expected := "exceeded max scope depth during object resolution"
++ if err == nil || !strings.HasSuffix(err.Error(), expected) {
++ t.Errorf("ParseFile(...) = _, %v, want %q", err, expected)
++ }
++ }
++ })
++ }
++ }
++}
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch
new file mode 100644
index 0000000000..4bc012be21
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-24675.patch
@@ -0,0 +1,271 @@
+From 1eb931d60a24501a9668e5cb4647593e19115507 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 17 Jun 2022 12:22:53 +0530
+Subject: [PATCH] CVE-2022-24675
+
+Upstream-Status: Backport [https://go-review.googlesource.com/c/go/+/399816/]
+CVE: CVE-2022-24675
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/pem/pem.go | 174 +++++++++++++++--------------------
+ src/encoding/pem/pem_test.go | 28 +++++-
+ 2 files changed, 101 insertions(+), 101 deletions(-)
+
+diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go
+index a7272da..1bee1c1 100644
+--- a/src/encoding/pem/pem.go
++++ b/src/encoding/pem/pem.go
+@@ -87,123 +87,97 @@ func Decode(data []byte) (p *Block, rest []byte) {
+ // pemStart begins with a newline. However, at the very beginning of
+ // the byte array, we'll accept the start string without it.
+ rest = data
+- if bytes.HasPrefix(data, pemStart[1:]) {
+- rest = rest[len(pemStart)-1 : len(data)]
+- } else if i := bytes.Index(data, pemStart); i >= 0 {
+- rest = rest[i+len(pemStart) : len(data)]
+- } else {
+- return nil, data
+- }
+-
+- typeLine, rest := getLine(rest)
+- if !bytes.HasSuffix(typeLine, pemEndOfLine) {
+- return decodeError(data, rest)
+- }
+- typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
+-
+- p = &Block{
+- Headers: make(map[string]string),
+- Type: string(typeLine),
+- }
+-
+ for {
+- // This loop terminates because getLine's second result is
+- // always smaller than its argument.
+- if len(rest) == 0 {
++ if bytes.HasPrefix(rest, pemStart[1:]) {
++ rest = rest[len(pemStart)-1:]
++ } else if i := bytes.Index(rest, pemStart); i >= 0 {
++ rest = rest[i+len(pemStart) : len(rest)]
++ } else {
+ return nil, data
+ }
+- line, next := getLine(rest)
+
+- i := bytes.IndexByte(line, ':')
+- if i == -1 {
+- break
++ var typeLine []byte
++ typeLine, rest = getLine(rest)
++ if !bytes.HasSuffix(typeLine, pemEndOfLine) {
++ continue
+ }
++ typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)]
+
+- // TODO(agl): need to cope with values that spread across lines.
+- key, val := line[:i], line[i+1:]
+- key = bytes.TrimSpace(key)
+- val = bytes.TrimSpace(val)
+- p.Headers[string(key)] = string(val)
+- rest = next
+- }
++ p = &Block{
++ Headers: make(map[string]string),
++ Type: string(typeLine),
++ }
+
+- var endIndex, endTrailerIndex int
++ for {
++ // This loop terminates because getLine's second result is
++ // always smaller than its argument.
++ if len(rest) == 0 {
++ return nil, data
++ }
++ line, next := getLine(rest)
+
+- // If there were no headers, the END line might occur
+- // immediately, without a leading newline.
+- if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
+- endIndex = 0
+- endTrailerIndex = len(pemEnd) - 1
+- } else {
+- endIndex = bytes.Index(rest, pemEnd)
+- endTrailerIndex = endIndex + len(pemEnd)
+- }
++ i := bytes.IndexByte(line, ':')
++ if i == -1 {
++ break
++ }
+
+- if endIndex < 0 {
+- return decodeError(data, rest)
+- }
++ // TODO(agl): need to cope with values that spread across lines.
++ key, val := line[:i], line[i+1:]
++ key = bytes.TrimSpace(key)
++ val = bytes.TrimSpace(val)
++ p.Headers[string(key)] = string(val)
++ rest = next
++ }
+
+- // After the "-----" of the ending line, there should be the same type
+- // and then a final five dashes.
+- endTrailer := rest[endTrailerIndex:]
+- endTrailerLen := len(typeLine) + len(pemEndOfLine)
+- if len(endTrailer) < endTrailerLen {
+- return decodeError(data, rest)
+- }
++ var endIndex, endTrailerIndex int
+
+- restOfEndLine := endTrailer[endTrailerLen:]
+- endTrailer = endTrailer[:endTrailerLen]
+- if !bytes.HasPrefix(endTrailer, typeLine) ||
+- !bytes.HasSuffix(endTrailer, pemEndOfLine) {
+- return decodeError(data, rest)
+- }
++ // If there were no headers, the END line might occur
++ // immediately, without a leading newline.
++ if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) {
++ endIndex = 0
++ endTrailerIndex = len(pemEnd) - 1
++ } else {
++ endIndex = bytes.Index(rest, pemEnd)
++ endTrailerIndex = endIndex + len(pemEnd)
++ }
+
+- // The line must end with only whitespace.
+- if s, _ := getLine(restOfEndLine); len(s) != 0 {
+- return decodeError(data, rest)
+- }
++ if endIndex < 0 {
++ continue
++ }
+
+- base64Data := removeSpacesAndTabs(rest[:endIndex])
+- p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
+- n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
+- if err != nil {
+- return decodeError(data, rest)
+- }
+- p.Bytes = p.Bytes[:n]
++ // After the "-----" of the ending line, there should be the same type
++ // and then a final five dashes.
++ endTrailer := rest[endTrailerIndex:]
++ endTrailerLen := len(typeLine) + len(pemEndOfLine)
++ if len(endTrailer) < endTrailerLen {
++ continue
++ }
++
++ restOfEndLine := endTrailer[endTrailerLen:]
++ endTrailer = endTrailer[:endTrailerLen]
++ if !bytes.HasPrefix(endTrailer, typeLine) ||
++ !bytes.HasSuffix(endTrailer, pemEndOfLine) {
++ continue
++ }
+
+- // the -1 is because we might have only matched pemEnd without the
+- // leading newline if the PEM block was empty.
+- _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
++ // The line must end with only whitespace.
++ if s, _ := getLine(restOfEndLine); len(s) != 0 {
++ continue
++ }
+
+- return
+-}
++ base64Data := removeSpacesAndTabs(rest[:endIndex])
++ p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data)))
++ n, err := base64.StdEncoding.Decode(p.Bytes, base64Data)
++ if err != nil {
++ continue
++ }
++ p.Bytes = p.Bytes[:n]
+
+-func decodeError(data, rest []byte) (*Block, []byte) {
+- // If we get here then we have rejected a likely looking, but
+- // ultimately invalid PEM block. We need to start over from a new
+- // position. We have consumed the preamble line and will have consumed
+- // any lines which could be header lines. However, a valid preamble
+- // line is not a valid header line, therefore we cannot have consumed
+- // the preamble line for the any subsequent block. Thus, we will always
+- // find any valid block, no matter what bytes precede it.
+- //
+- // For example, if the input is
+- //
+- // -----BEGIN MALFORMED BLOCK-----
+- // junk that may look like header lines
+- // or data lines, but no END line
+- //
+- // -----BEGIN ACTUAL BLOCK-----
+- // realdata
+- // -----END ACTUAL BLOCK-----
+- //
+- // we've failed to parse using the first BEGIN line
+- // and now will try again, using the second BEGIN line.
+- p, rest := Decode(rest)
+- if p == nil {
+- rest = data
++ // the -1 is because we might have only matched pemEnd without the
++ // leading newline if the PEM block was empty.
++ _, rest = getLine(rest[endIndex+len(pemEnd)-1:])
++ return p, rest
+ }
+- return p, rest
+ }
+
+ const pemLineLength = 64
+diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go
+index 8515b46..4485581 100644
+--- a/src/encoding/pem/pem_test.go
++++ b/src/encoding/pem/pem_test.go
+@@ -107,6 +107,12 @@ const pemMissingEndingSpace = `
+ dGVzdA==
+ -----ENDBAR-----`
+
++const pemMissingEndLine = `
++-----BEGIN FOO-----
++Header: 1`
++
++var pemRepeatingBegin = strings.Repeat("-----BEGIN \n", 10)
++
+ var badPEMTests = []struct {
+ name string
+ input string
+@@ -131,14 +137,34 @@ var badPEMTests = []struct {
+ "missing ending space",
+ pemMissingEndingSpace,
+ },
++ {
++ "repeating begin",
++ pemRepeatingBegin,
++ },
++ {
++ "missing end line",
++ pemMissingEndLine,
++ },
+ }
+
+ func TestBadDecode(t *testing.T) {
+ for _, test := range badPEMTests {
+- result, _ := Decode([]byte(test.input))
++ result, rest := Decode([]byte(test.input))
+ if result != nil {
+ t.Errorf("unexpected success while parsing %q", test.name)
+ }
++ if string(rest) != test.input {
++ t.Errorf("unexpected rest: %q; want = %q", rest, test.input)
++ }
++ }
++}
++
++func TestCVE202224675(t *testing.T) {
++ // Prior to CVE-2022-24675, this input would cause a stack overflow.
++ input := []byte(strings.Repeat("-----BEGIN \n", 10000000))
++ result, rest := Decode(input)
++ if result != nil || !reflect.DeepEqual(rest, input) {
++ t.Errorf("Encode of %#v decoded as %#v", input, rest)
+ }
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch
new file mode 100644
index 0000000000..e4270d8a75
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-24921.patch
@@ -0,0 +1,198 @@
+From ba99f699d26483ea1045f47c760e9be30799e311 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Wed, 2 Feb 2022 16:41:32 -0500
+Subject: [PATCH] regexp/syntax: reject very deeply nested regexps in Parse
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2b65cde5868d8245ef8a0b8eba1e361440252d3b]
+CVE: CVE-2022-24921
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org
+
+
+The regexp code assumes it can recurse over the structure of
+a regexp safely. Go's growable stacks make that reasonable
+for all plausible regexps, but implausible ones can reach the
+“infinite recursion?” stack limit.
+
+This CL limits the depth of any parsed regexp to 1000.
+That is, the depth of the parse tree is required to be ≤ 1000.
+Regexps that require deeper parse trees will return ErrInternalError.
+A future CL will change the error to ErrInvalidDepth,
+but using ErrInternalError for now avoids introducing new API
+in point releases when this is backported.
+
+Fixes #51112.
+Fixes #51117.
+
+Change-Id: I97d2cd82195946eb43a4ea8561f5b95f91fb14c5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/384616
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+Reviewed-by: Ian Lance Taylor <iant@golang.org>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/384855
+---
+ src/regexp/syntax/parse.go | 72 ++++++++++++++++++++++++++++++++-
+ src/regexp/syntax/parse_test.go | 7 ++++
+ 2 files changed, 77 insertions(+), 2 deletions(-)
+
+diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
+index 8c6d43a..55bd20d 100644
+--- a/src/regexp/syntax/parse.go
++++ b/src/regexp/syntax/parse.go
+@@ -76,13 +76,29 @@ const (
+ opVerticalBar
+ )
+
++// maxHeight is the maximum height of a regexp parse tree.
++// It is somewhat arbitrarily chosen, but the idea is to be large enough
++// that no one will actually hit in real use but at the same time small enough
++// that recursion on the Regexp tree will not hit the 1GB Go stack limit.
++// The maximum amount of stack for a single recursive frame is probably
++// closer to 1kB, so this could potentially be raised, but it seems unlikely
++// that people have regexps nested even this deeply.
++// We ran a test on Google's C++ code base and turned up only
++// a single use case with depth > 100; it had depth 128.
++// Using depth 1000 should be plenty of margin.
++// As an optimization, we don't even bother calculating heights
++// until we've allocated at least maxHeight Regexp structures.
++const maxHeight = 1000
++
+ type parser struct {
+ flags Flags // parse mode flags
+ stack []*Regexp // stack of parsed expressions
+ free *Regexp
+ numCap int // number of capturing groups seen
+ wholeRegexp string
+- tmpClass []rune // temporary char class work space
++ tmpClass []rune // temporary char class work space
++ numRegexp int // number of regexps allocated
++ height map[*Regexp]int // regexp height for height limit check
+ }
+
+ func (p *parser) newRegexp(op Op) *Regexp {
+@@ -92,16 +108,52 @@ func (p *parser) newRegexp(op Op) *Regexp {
+ *re = Regexp{}
+ } else {
+ re = new(Regexp)
++ p.numRegexp++
+ }
+ re.Op = op
+ return re
+ }
+
+ func (p *parser) reuse(re *Regexp) {
++ if p.height != nil {
++ delete(p.height, re)
++ }
+ re.Sub0[0] = p.free
+ p.free = re
+ }
+
++func (p *parser) checkHeight(re *Regexp) {
++ if p.numRegexp < maxHeight {
++ return
++ }
++ if p.height == nil {
++ p.height = make(map[*Regexp]int)
++ for _, re := range p.stack {
++ p.checkHeight(re)
++ }
++ }
++ if p.calcHeight(re, true) > maxHeight {
++ panic(ErrInternalError)
++ }
++}
++
++func (p *parser) calcHeight(re *Regexp, force bool) int {
++ if !force {
++ if h, ok := p.height[re]; ok {
++ return h
++ }
++ }
++ h := 1
++ for _, sub := range re.Sub {
++ hsub := p.calcHeight(sub, false)
++ if h < 1+hsub {
++ h = 1 + hsub
++ }
++ }
++ p.height[re] = h
++ return h
++}
++
+ // Parse stack manipulation.
+
+ // push pushes the regexp re onto the parse stack and returns the regexp.
+@@ -137,6 +189,7 @@ func (p *parser) push(re *Regexp) *Regexp {
+ }
+
+ p.stack = append(p.stack, re)
++ p.checkHeight(re)
+ return re
+ }
+
+@@ -252,6 +305,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
+ re.Sub = re.Sub0[:1]
+ re.Sub[0] = sub
+ p.stack[n-1] = re
++ p.checkHeight(re)
+
+ if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+ return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+@@ -699,6 +753,21 @@ func literalRegexp(s string, flags Flags) *Regexp {
+ // Flags, and returns a regular expression parse tree. The syntax is
+ // described in the top-level comment.
+ func Parse(s string, flags Flags) (*Regexp, error) {
++ return parse(s, flags)
++}
++
++func parse(s string, flags Flags) (_ *Regexp, err error) {
++ defer func() {
++ switch r := recover(); r {
++ default:
++ panic(r)
++ case nil:
++ // ok
++ case ErrInternalError:
++ err = &Error{Code: ErrInternalError, Expr: s}
++ }
++ }()
++
+ if flags&Literal != 0 {
+ // Trivial parser for literal string.
+ if err := checkUTF8(s); err != nil {
+@@ -710,7 +779,6 @@ func Parse(s string, flags Flags) (*Regexp, error) {
+ // Otherwise, must do real work.
+ var (
+ p parser
+- err error
+ c rune
+ op Op
+ lastRepeat string
+diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
+index 5581ba1..1ef6d8a 100644
+--- a/src/regexp/syntax/parse_test.go
++++ b/src/regexp/syntax/parse_test.go
+@@ -207,6 +207,11 @@ var parseTests = []parseTest{
+ // Valid repetitions.
+ {`((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}))`, ``},
+ {`((((((((((x{1}){2}){2}){2}){2}){2}){2}){2}){2}){2})`, ``},
++
++ // Valid nesting.
++ {strings.Repeat("(", 999) + strings.Repeat(")", 999), ``},
++ {strings.Repeat("(?:", 999) + strings.Repeat(")*", 999), ``},
++ {"(" + strings.Repeat("|", 12345) + ")", ``}, // not nested at all
+ }
+
+ const testFlags = MatchNL | PerlX | UnicodeGroups
+@@ -482,6 +487,8 @@ var invalidRegexps = []string{
+ `a{100000}`,
+ `a{100000,}`,
+ "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
++ strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
++ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
+ `\Q\E*`,
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch
new file mode 100644
index 0000000000..238c3eac5b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-27664.patch
@@ -0,0 +1,68 @@
+From 48c9076dcfc2dc894842ff758c8cfae7957c9565 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 29 Sep 2022 17:06:18 +0530
+Subject: [PATCH] CVE-2022-27664
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5bc9106458fc07851ac324a4157132a91b1f3479]
+CVE: CVE-2022-27664
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/net/http/h2_bundle.go | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 65d851d..83f2a72 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -3254,10 +3254,11 @@ var (
+ // name (key). See httpguts.ValidHeaderName for the base rules.
+ //
+ // Further, http2 says:
+-// "Just as in HTTP/1.x, header field names are strings of ASCII
+-// characters that are compared in a case-insensitive
+-// fashion. However, header field names MUST be converted to
+-// lowercase prior to their encoding in HTTP/2. "
++//
++// "Just as in HTTP/1.x, header field names are strings of ASCII
++// characters that are compared in a case-insensitive
++// fashion. However, header field names MUST be converted to
++// lowercase prior to their encoding in HTTP/2. "
+ func http2validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+@@ -3446,8 +3447,8 @@ func (s *http2sorter) SortStrings(ss []string) {
+ // validPseudoPath reports whether v is a valid :path pseudo-header
+ // value. It must be either:
+ //
+-// *) a non-empty string starting with '/'
+-// *) the string '*', for OPTIONS requests.
++// *) a non-empty string starting with '/'
++// *) the string '*', for OPTIONS requests.
+ //
+ // For now this is only used a quick check for deciding when to clean
+ // up Opaque URLs before sending requests from the Transport.
+@@ -4897,6 +4898,9 @@ func (sc *http2serverConn) startGracefulShutdownInternal() {
+ func (sc *http2serverConn) goAway(code http2ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
++ if sc.goAwayCode == http2ErrCodeNo {
++ sc.goAwayCode = code
++ }
+ return
+ }
+ sc.inGoAway = true
+@@ -6091,8 +6095,9 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
+ // prior to the headers being written. If the set of trailers is fixed
+ // or known before the header is written, the normal Go trailers mechanism
+ // is preferred:
+-// https://golang.org/pkg/net/http/#ResponseWriter
+-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
++//
++// https://golang.org/pkg/net/http/#ResponseWriter
++// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+ const http2TrailerPrefix = "Trailer:"
+
+ // promoteUndeclaredTrailers permits http.Handlers to set trailers
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch
new file mode 100644
index 0000000000..8afa292144
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-28131.patch
@@ -0,0 +1,104 @@
+From 8136eb2e5c316a51d0da710fbd0504cbbefee526 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Mon, 28 Mar 2022 18:41:26 -0700
+Subject: [PATCH] encoding/xml: use iterative Skip, rather than recursive
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/58facfbe7db2fbb9afed794b281a70bdb12a60ae]
+CVE: CVE-2022-28131
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Prevents exhausting the stack limit in _incredibly_ deeply nested
+structures.
+
+Fixes #53711
+Updates #53614
+Fixes CVE-2022-28131
+
+Change-Id: I47db4595ce10cecc29fbd06afce7b299868599e6
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1419912
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 9278cb78443d2b4deb24cbb5b61c9ba5ac688d49)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/417068
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Heschi Kreinick <heschi@google.com>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+---
+ src/encoding/xml/read.go | 15 ++++++++-------
+ src/encoding/xml/read_test.go | 18 ++++++++++++++++++
+ 2 files changed, 26 insertions(+), 7 deletions(-)
+
+diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
+index 4ffed80..3fac859 100644
+--- a/src/encoding/xml/read.go
++++ b/src/encoding/xml/read.go
+@@ -743,12 +743,12 @@ Loop:
+ }
+
+ // Skip reads tokens until it has consumed the end element
+-// matching the most recent start element already consumed.
+-// It recurs if it encounters a start element, so it can be used to
+-// skip nested structures.
++// matching the most recent start element already consumed,
++// skipping nested structures.
+ // It returns nil if it finds an end element matching the start
+ // element; otherwise it returns an error describing the problem.
+ func (d *Decoder) Skip() error {
++ var depth int64
+ for {
+ tok, err := d.Token()
+ if err != nil {
+@@ -756,11 +756,12 @@ func (d *Decoder) Skip() error {
+ }
+ switch tok.(type) {
+ case StartElement:
+- if err := d.Skip(); err != nil {
+- return err
+- }
++ depth++
+ case EndElement:
+- return nil
++ if depth == 0 {
++ return nil
++ }
++ depth--
+ }
+ }
+ }
+diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
+index 6a20b1a..7a621a5 100644
+--- a/src/encoding/xml/read_test.go
++++ b/src/encoding/xml/read_test.go
+@@ -5,9 +5,11 @@
+ package xml
+
+ import (
++ "bytes"
+ "errors"
+ "io"
+ "reflect"
++ "runtime"
+ "strings"
+ "testing"
+ "time"
+@@ -1093,3 +1095,19 @@ func TestCVE202228131(t *testing.T) {
+ t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
+ }
+ }
++
++func TestCVE202230633(t *testing.T) {
++ if runtime.GOARCH == "wasm" {
++ t.Skip("causes memory exhaustion on js/wasm")
++ }
++ defer func() {
++ p := recover()
++ if p != nil {
++ t.Fatal("Unmarshal panicked")
++ }
++ }()
++ var example struct {
++ Things []string
++ }
++ Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch
new file mode 100644
index 0000000000..6361deec7d
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-28327.patch
@@ -0,0 +1,36 @@
+From 34d9ab78568d63d8097911237897b188bdaba9c2 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Thu, 31 Mar 2022 12:31:58 -0400
+Subject: [PATCH] crypto/elliptic: tolerate zero-padded scalars in generic
+ P-256
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7139e8b024604ab168b51b99c6e8168257a5bf58]
+CVE: CVE-2022-28327
+Signed-off-by: Ralph Siemsen <ralph.siemsen@linaro.org>
+
+
+Updates #52075
+Fixes #52076
+Fixes CVE-2022-28327
+
+Change-Id: I595a7514c9a0aa1b9c76aedfc2307e1124271f27
+Reviewed-on: https://go-review.googlesource.com/c/go/+/397136
+Trust: Filippo Valsorda <filippo@golang.org>
+Reviewed-by: Julie Qiu <julie@golang.org>
+---
+ src/crypto/elliptic/p256.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/crypto/elliptic/p256.go b/src/crypto/elliptic/p256.go
+index c23e414..787e3e7 100644
+--- a/src/crypto/elliptic/p256.go
++++ b/src/crypto/elliptic/p256.go
+@@ -51,7 +51,7 @@ func p256GetScalar(out *[32]byte, in []byte) {
+ n := new(big.Int).SetBytes(in)
+ var scalarBytes []byte
+
+- if n.Cmp(p256Params.N) >= 0 {
++ if n.Cmp(p256Params.N) >= 0 || len(in) > len(out) {
+ n.Mod(n, p256Params.N)
+ scalarBytes = n.Bytes()
+ } else {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch
new file mode 100644
index 0000000000..ea04a82d16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-2879.patch
@@ -0,0 +1,111 @@
+From 9d339f1d0f53c4116a7cb4acfa895f31a07212ee Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 2 Sep 2022 20:45:18 -0700
+Subject: [PATCH] archive/tar: limit size of headers
+
+Set a 1MiB limit on special file blocks (PAX headers, GNU long names,
+GNU link names), to avoid reading arbitrarily large amounts of data
+into memory.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting
+this issue.
+
+Fixes CVE-2022-2879
+Updates #54853
+Fixes #55926
+
+Change-Id: I85136d6ff1e0af101a112190e027987ab4335680
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1565555
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 6ee768cef6b82adf7a90dcf367a1699ef694f3b2)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1591053
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438498
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/0a723816cd2]
+CVE: CVE-2022-2879
+Signed-off-by: Sunil Kumar <sukumar@mvista.com>
+---
+ src/archive/tar/format.go | 4 ++++
+ src/archive/tar/reader.go | 14 ++++++++++++--
+ src/archive/tar/writer.go | 3 +++
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go
+index cfe24a5..6642364 100644
+--- a/src/archive/tar/format.go
++++ b/src/archive/tar/format.go
+@@ -143,6 +143,10 @@ const (
+ blockSize = 512 // Size of each block in a tar stream
+ nameSize = 100 // Max length of the name field in USTAR format
+ prefixSize = 155 // Max length of the prefix field in USTAR format
++
++ // Max length of a special file (PAX header, GNU long name or link).
++ // This matches the limit used by libarchive.
++ maxSpecialFileSize = 1 << 20
+ )
+
+ // blockPadding computes the number of bytes needed to pad offset up to the
+diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
+index 4f9135b..e996595 100644
+--- a/src/archive/tar/reader.go
++++ b/src/archive/tar/reader.go
+@@ -104,7 +104,7 @@ func (tr *Reader) next() (*Header, error) {
+ continue // This is a meta header affecting the next header
+ case TypeGNULongName, TypeGNULongLink:
+ format.mayOnlyBe(FormatGNU)
+- realname, err := ioutil.ReadAll(tr)
++ realname, err := readSpecialFile(tr)
+ if err != nil {
+ return nil, err
+ }
+@@ -294,7 +294,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+ // parsePAX parses PAX headers.
+ // If an extended header (type 'x') is invalid, ErrHeader is returned
+ func parsePAX(r io.Reader) (map[string]string, error) {
+- buf, err := ioutil.ReadAll(r)
++ buf, err := readSpecialFile(r)
+ if err != nil {
+ return nil, err
+ }
+@@ -827,6 +827,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+ return n, err
+ }
+
++// readSpecialFile is like ioutil.ReadAll except it returns
++// ErrFieldTooLong if more than maxSpecialFileSize is read.
++func readSpecialFile(r io.Reader) ([]byte, error) {
++ buf, err := ioutil.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
++ if len(buf) > maxSpecialFileSize {
++ return nil, ErrFieldTooLong
++ }
++ return buf, err
++}
++
+ // discard skips n bytes in r, reporting an error if unable to do so.
+ func discard(r io.Reader, n int64) error {
+ // If possible, Seek to the last byte before the end of the data section.
+diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go
+index e80498d..893eac0 100644
+--- a/src/archive/tar/writer.go
++++ b/src/archive/tar/writer.go
+@@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+ flag = TypeXHeader
+ }
+ data := buf.String()
++ if len(data) > maxSpecialFileSize {
++ return ErrFieldTooLong
++ }
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+ return err // Global headers return here
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch
new file mode 100644
index 0000000000..8376dc45ba
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-2880.patch
@@ -0,0 +1,164 @@
+From 753e3f8da191c2ac400407d83c70f46900769417 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 27 Oct 2022 12:22:41 +0530
+Subject: [PATCH] CVE-2022-2880
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/9d2c73a9fd69e45876509bb3bdb2af99bf77da1e]
+CVE: CVE-2022-2880
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+net/http/httputil: avoid query parameter
+
+Query parameter smuggling occurs when a proxy's interpretation
+of query parameters differs from that of a downstream server.
+Change ReverseProxy to avoid forwarding ignored query parameters.
+
+Remove unparsable query parameters from the outbound request
+
+ * if req.Form != nil after calling ReverseProxy.Director; and
+ * before calling ReverseProxy.Rewrite.
+
+This change preserves the existing behavior of forwarding the
+raw query untouched if a Director hook does not parse the query
+by calling Request.ParseForm (possibly indirectly).
+---
+ src/net/http/httputil/reverseproxy.go | 36 +++++++++++
+ src/net/http/httputil/reverseproxy_test.go | 74 ++++++++++++++++++++++
+ 2 files changed, 110 insertions(+)
+
+diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go
+index 2072a5f..c6fb873 100644
+--- a/src/net/http/httputil/reverseproxy.go
++++ b/src/net/http/httputil/reverseproxy.go
+@@ -212,6 +212,9 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ }
+
+ p.Director(outreq)
++ if outreq.Form != nil {
++ outreq.URL.RawQuery = cleanQueryParams(outreq.URL.RawQuery)
++ }
+ outreq.Close = false
+
+ reqUpType := upgradeType(outreq.Header)
+@@ -561,3 +564,36 @@ func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
+ _, err := io.Copy(c.backend, c.user)
+ errc <- err
+ }
++
++func cleanQueryParams(s string) string {
++ reencode := func(s string) string {
++ v, _ := url.ParseQuery(s)
++ return v.Encode()
++ }
++ for i := 0; i < len(s); {
++ switch s[i] {
++ case ';':
++ return reencode(s)
++ case '%':
++ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
++ return reencode(s)
++ }
++ i += 3
++ default:
++ i++
++ }
++ }
++ return s
++}
++
++func ishex(c byte) bool {
++ switch {
++ case '0' <= c && c <= '9':
++ return true
++ case 'a' <= c && c <= 'f':
++ return true
++ case 'A' <= c && c <= 'F':
++ return true
++ }
++ return false
++}
+diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go
+index 9a7223a..bc87a3b 100644
+--- a/src/net/http/httputil/reverseproxy_test.go
++++ b/src/net/http/httputil/reverseproxy_test.go
+@@ -1269,3 +1269,77 @@ func TestSingleJoinSlash(t *testing.T) {
+ }
+ }
+ }
++
++const (
++ testWantsCleanQuery = true
++ testWantsRawQuery = false
++)
++
++func TestReverseProxyQueryParameterSmugglingDirectorDoesNotParseForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsRawQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func TestReverseProxyQueryParameterSmugglingDirectorParsesForm(t *testing.T) {
++ testReverseProxyQueryParameterSmuggling(t, testWantsCleanQuery, func(u *url.URL) *ReverseProxy {
++ proxyHandler := NewSingleHostReverseProxy(u)
++ oldDirector := proxyHandler.Director
++ proxyHandler.Director = func(r *http.Request) {
++ // Parsing the form causes ReverseProxy to remove unparsable
++ // query parameters before forwarding.
++ r.FormValue("a")
++ oldDirector(r)
++ }
++ return proxyHandler
++ })
++}
++
++func testReverseProxyQueryParameterSmuggling(t *testing.T, wantCleanQuery bool, newProxy func(*url.URL) *ReverseProxy) {
++ const content = "response_content"
++ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
++ w.Write([]byte(r.URL.RawQuery))
++ }))
++ defer backend.Close()
++ backendURL, err := url.Parse(backend.URL)
++ if err != nil {
++ t.Fatal(err)
++ }
++ proxyHandler := newProxy(backendURL)
++ frontend := httptest.NewServer(proxyHandler)
++ defer frontend.Close()
++
++ // Don't spam output with logs of queries containing semicolons.
++ backend.Config.ErrorLog = log.New(io.Discard, "", 0)
++ frontend.Config.ErrorLog = log.New(io.Discard, "", 0)
++
++ for _, test := range []struct {
++ rawQuery string
++ cleanQuery string
++ }{{
++ rawQuery: "a=1&a=2;b=3",
++ cleanQuery: "a=1",
++ }, {
++ rawQuery: "a=1&a=%zz&b=3",
++ cleanQuery: "a=1&b=3",
++ }} {
++ res, err := frontend.Client().Get(frontend.URL + "?" + test.rawQuery)
++ if err != nil {
++ t.Fatalf("Get: %v", err)
++ }
++ defer res.Body.Close()
++ body, _ := io.ReadAll(res.Body)
++ wantQuery := test.rawQuery
++ if wantCleanQuery {
++ wantQuery = test.cleanQuery
++ }
++ if got, want := string(body), wantQuery; got != want {
++ t.Errorf("proxy forwarded raw query %q as %q, want %q", test.rawQuery, got, want)
++ }
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch
new file mode 100644
index 0000000000..47313a547f
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30629.patch
@@ -0,0 +1,47 @@
+From 8d0bbb5a6280c2cf951241ec7f6579c90d38df57 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 10:55:08 +0530
+Subject: [PATCH] CVE-2022-30629
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c15a8e2dbb5ac376a6ed890735341b812d6b965c]
+CVE: CVE-2022-30629
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/crypto/tls/handshake_server_tls13.go | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/src/crypto/tls/handshake_server_tls13.go b/src/crypto/tls/handshake_server_tls13.go
+index 5432145..d91797e 100644
+--- a/src/crypto/tls/handshake_server_tls13.go
++++ b/src/crypto/tls/handshake_server_tls13.go
+@@ -9,6 +9,7 @@ import (
+ "crypto"
+ "crypto/hmac"
+ "crypto/rsa"
++ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+@@ -742,6 +743,19 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
+ }
+ m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
+
++ // ticket_age_add is a random 32-bit value. See RFC 8446, section 4.6.1
++ // The value is not stored anywhere; we never need to check the ticket age
++ // because 0-RTT is not supported.
++ ageAdd := make([]byte, 4)
++ _, err = hs.c.config.rand().Read(ageAdd)
++ if err != nil {
++ return err
++ }
++ m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
++
++ // ticket_nonce, which must be unique per connection, is always left at
++ // zero because we only ever send one ticket per connection.
++
+ if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ return err
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch
new file mode 100644
index 0000000000..5dcfd27f16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30631.patch
@@ -0,0 +1,116 @@
+From d10fc3a84e3344f2421c1dd3046faa50709ab4d5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 11:01:21 +0530
+Subject: [PATCH] CVE-2022-30631
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/0117dee7dccbbd7803d88f65a2ce8bd686219ad3]
+CVE: CVE-2022-30631
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/compress/gzip/gunzip.go | 60 +++++++++++++++-----------------
+ src/compress/gzip/gunzip_test.go | 16 +++++++++
+ 2 files changed, 45 insertions(+), 31 deletions(-)
+
+diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
+index 924bce1..237b2b9 100644
+--- a/src/compress/gzip/gunzip.go
++++ b/src/compress/gzip/gunzip.go
+@@ -248,42 +248,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
+ return 0, z.err
+ }
+
+- n, z.err = z.decompressor.Read(p)
+- z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+- z.size += uint32(n)
+- if z.err != io.EOF {
+- // In the normal case we return here.
+- return n, z.err
+- }
++ for n == 0 {
++ n, z.err = z.decompressor.Read(p)
++ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
++ z.size += uint32(n)
++ if z.err != io.EOF {
++ // In the normal case we return here.
++ return n, z.err
++ }
+
+- // Finished file; check checksum and size.
+- if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+- z.err = noEOF(err)
+- return n, z.err
+- }
+- digest := le.Uint32(z.buf[:4])
+- size := le.Uint32(z.buf[4:8])
+- if digest != z.digest || size != z.size {
+- z.err = ErrChecksum
+- return n, z.err
+- }
+- z.digest, z.size = 0, 0
++ // Finished file; check checksum and size.
++ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
++ z.err = noEOF(err)
++ return n, z.err
++ }
++ digest := le.Uint32(z.buf[:4])
++ size := le.Uint32(z.buf[4:8])
++ if digest != z.digest || size != z.size {
++ z.err = ErrChecksum
++ return n, z.err
++ }
++ z.digest, z.size = 0, 0
+
+- // File is ok; check if there is another.
+- if !z.multistream {
+- return n, io.EOF
+- }
+- z.err = nil // Remove io.EOF
++ // File is ok; check if there is another.
++ if !z.multistream {
++ return n, io.EOF
++ }
++ z.err = nil // Remove io.EOF
+
+- if _, z.err = z.readHeader(); z.err != nil {
+- return n, z.err
++ if _, z.err = z.readHeader(); z.err != nil {
++ return n, z.err
++ }
+ }
+
+- // Read from next file, if necessary.
+- if n > 0 {
+- return n, nil
+- }
+- return z.Read(p)
++ return n, nil
+ }
+
+ // Close closes the Reader. It does not close the underlying io.Reader.
+diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
+index 1b01404..95220ae 100644
+--- a/src/compress/gzip/gunzip_test.go
++++ b/src/compress/gzip/gunzip_test.go
+@@ -516,3 +516,19 @@ func TestTruncatedStreams(t *testing.T) {
+ }
+ }
+ }
++
++func TestCVE202230631(t *testing.T) {
++ var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
++ 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
++ r := bytes.NewReader(bytes.Repeat(empty, 4e6))
++ z, err := NewReader(r)
++ if err != nil {
++ t.Fatalf("NewReader: got %v, want nil", err)
++ }
++ // Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
++ // to stack exhaustion.
++ _, err = z.Read(make([]byte, 10))
++ if err != io.EOF {
++ t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch
new file mode 100644
index 0000000000..c54ef56a0e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30632.patch
@@ -0,0 +1,71 @@
+From 35d1dfe9746029aea9027b405c75555d41ffd2f8 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 13:12:40 +0530
+Subject: [PATCH] CVE-2022-30632
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/76f8b7304d1f7c25834e2a0cc9e88c55276c47df]
+CVE: CVE-2022-30632
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/path/filepath/match.go | 16 +++++++++++++++-
+ src/path/filepath/match_test.go | 10 ++++++++++
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go
+index 46badb5..ba68daa 100644
+--- a/src/path/filepath/match.go
++++ b/src/path/filepath/match.go
+@@ -232,6 +232,20 @@ func getEsc(chunk string) (r rune, nchunk string, err error) {
+ // The only possible returned error is ErrBadPattern, when pattern
+ // is malformed.
+ func Glob(pattern string) (matches []string, err error) {
++ return globWithLimit(pattern, 0)
++}
++
++func globWithLimit(pattern string, depth int) (matches []string, err error) {
++ // This limit is used prevent stack exhaustion issues. See CVE-2022-30632.
++ const pathSeparatorsLimit = 10000
++ if depth == pathSeparatorsLimit {
++ return nil, ErrBadPattern
++ }
++
++ // Check pattern is well-formed.
++ if _, err := Match(pattern, ""); err != nil {
++ return nil, err
++ }
+ if !hasMeta(pattern) {
+ if _, err = os.Lstat(pattern); err != nil {
+ return nil, nil
+@@ -257,7 +271,7 @@ func Glob(pattern string) (matches []string, err error) {
+ }
+
+ var m []string
+- m, err = Glob(dir)
++ m, err = globWithLimit(dir, depth+1)
+ if err != nil {
+ return
+ }
+diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
+index b865762..c37c812 100644
+--- a/src/path/filepath/match_test.go
++++ b/src/path/filepath/match_test.go
+@@ -154,6 +154,16 @@ func TestGlob(t *testing.T) {
+ }
+ }
+
++func TestCVE202230632(t *testing.T) {
++ // Prior to CVE-2022-30632, this would cause a stack exhaustion given a
++ // large number of separators (more than 4,000,000). There is now a limit
++ // of 10,000.
++ _, err := Glob("/*" + strings.Repeat("/", 10001))
++ if err != ErrBadPattern {
++ t.Fatalf("Glob returned err=%v, want ErrBadPattern", err)
++ }
++}
++
+ func TestGlobError(t *testing.T) {
+ _, err := Glob("[]")
+ if err == nil {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch
new file mode 100644
index 0000000000..c16cb5f50c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30633.patch
@@ -0,0 +1,131 @@
+From ab6e2ffdcab0501bcc2de4b196c1c18ae2301d4b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 25 Aug 2022 13:29:55 +0530
+Subject: [PATCH] CVE-2022-30633
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2678d0c957193dceef336c969a9da74dd716a827]
+CVE: CVE-2022-30633
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/xml/read.go | 27 +++++++++++++++++++--------
+ src/encoding/xml/read_test.go | 14 ++++++++++++++
+ 2 files changed, 33 insertions(+), 8 deletions(-)
+
+diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go
+index 10a60ee..4ffed80 100644
+--- a/src/encoding/xml/read.go
++++ b/src/encoding/xml/read.go
+@@ -148,7 +148,7 @@ func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+- return d.unmarshal(val.Elem(), start)
++ return d.unmarshal(val.Elem(), start, 0)
+ }
+
+ // An UnmarshalError represents an error in the unmarshaling process.
+@@ -304,8 +304,15 @@ var (
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ )
+
++const maxUnmarshalDepth = 10000
++
++var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
++
+ // Unmarshal a single XML element into val.
+-func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
++func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
++ if depth >= maxUnmarshalDepth {
++ return errExeceededMaxUnmarshalDepth
++ }
+ // Find start element if we need it.
+ if start == nil {
+ for {
+@@ -398,7 +405,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+ v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
+
+ // Recur to read element into slice.
+- if err := d.unmarshal(v.Index(n), start); err != nil {
++ if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
+ v.SetLen(n)
+ return err
+ }
+@@ -521,13 +528,15 @@ Loop:
+ case StartElement:
+ consumed := false
+ if sv.IsValid() {
+- consumed, err = d.unmarshalPath(tinfo, sv, nil, &t)
++ // unmarshalPath can call unmarshal, so we need to pass the depth through so that
++ // we can continue to enforce the maximum recusion limit.
++ consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
+ if err != nil {
+ return err
+ }
+ if !consumed && saveAny.IsValid() {
+ consumed = true
+- if err := d.unmarshal(saveAny, &t); err != nil {
++ if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
+ return err
+ }
+ }
+@@ -672,7 +681,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) {
+ // The consumed result tells whether XML elements have been consumed
+ // from the Decoder until start's matching end element, or if it's
+ // still untouched because start is uninteresting for sv's fields.
+-func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
++func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
+ recurse := false
+ Loop:
+ for i := range tinfo.fields {
+@@ -687,7 +696,7 @@ Loop:
+ }
+ if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
+ // It's a perfect match, unmarshal the field.
+- return true, d.unmarshal(finfo.value(sv), start)
++ return true, d.unmarshal(finfo.value(sv), start, depth+1)
+ }
+ if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
+ // It's a prefix for the field. Break and recurse
+@@ -716,7 +725,9 @@ Loop:
+ }
+ switch t := tok.(type) {
+ case StartElement:
+- consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t)
++ // the recursion depth of unmarshalPath is limited to the path length specified
++ // by the struct field tag, so we don't increment the depth here.
++ consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
+ if err != nil {
+ return true, err
+ }
+diff --git a/src/encoding/xml/read_test.go b/src/encoding/xml/read_test.go
+index 8c2e70f..6a20b1a 100644
+--- a/src/encoding/xml/read_test.go
++++ b/src/encoding/xml/read_test.go
+@@ -5,6 +5,7 @@
+ package xml
+
+ import (
++ "errors"
+ "io"
+ "reflect"
+ "strings"
+@@ -1079,3 +1080,16 @@ func TestUnmarshalWhitespaceAttrs(t *testing.T) {
+ t.Fatalf("whitespace attrs: Unmarshal:\nhave: %#+v\nwant: %#+v", v, want)
+ }
+ }
++
++func TestCVE202228131(t *testing.T) {
++ type nested struct {
++ Parent *nested `xml:",any"`
++ }
++ var n nested
++ err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
++ if err == nil {
++ t.Fatal("Unmarshal did not fail")
++ } else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
++ t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch
new file mode 100644
index 0000000000..73959f70fa
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-30635.patch
@@ -0,0 +1,120 @@
+From fdd4316737ed5681689a1f40802ffa0805e5b11c Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 26 Aug 2022 12:17:05 +0530
+Subject: [PATCH] CVE-2022-30635
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/cd54600b866db0ad068ab8df06c7f5f6cb55c9b3]
+CVE-2022-30635
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/encoding/gob/decode.go | 19 ++++++++++++-------
+ src/encoding/gob/gobencdec_test.go | 24 ++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
+index d2f6c74..0e0ec75 100644
+--- a/src/encoding/gob/decode.go
++++ b/src/encoding/gob/decode.go
+@@ -871,8 +871,13 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
+ return &op
+ }
+
++var maxIgnoreNestingDepth = 10000
++
+ // decIgnoreOpFor returns the decoding op for a field that has no destination.
+-func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp {
++func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
++ if depth > maxIgnoreNestingDepth {
++ error_(errors.New("invalid nesting depth"))
++ }
+ // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
+ // Return the pointer to the op we're already building.
+ if opPtr := inProgress[wireId]; opPtr != nil {
+@@ -896,7 +901,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
+ errorf("bad data: undefined type %s", wireId.string())
+ case wire.ArrayT != nil:
+ elemId := wire.ArrayT.Elem
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
+ }
+@@ -904,15 +909,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
+ case wire.MapT != nil:
+ keyId := dec.wireType[wireId].MapT.Key
+ elemId := dec.wireType[wireId].MapT.Elem
+- keyOp := dec.decIgnoreOpFor(keyId, inProgress)
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreMap(state, *keyOp, *elemOp)
+ }
+
+ case wire.SliceT != nil:
+ elemId := wire.SliceT.Elem
+- elemOp := dec.decIgnoreOpFor(elemId, inProgress)
++ elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
+ op = func(i *decInstr, state *decoderState, value reflect.Value) {
+ state.dec.ignoreSlice(state, *elemOp)
+ }
+@@ -1073,7 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de
+ func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
+ engine := new(decEngine)
+ engine.instr = make([]decInstr, 1) // one item
+- op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp))
++ op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
+ ovfl := overflow(dec.typeString(remoteId))
+ engine.instr[0] = decInstr{*op, 0, nil, ovfl}
+ engine.numInstr = 1
+@@ -1118,7 +1123,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn
+ localField, present := srt.FieldByName(wireField.Name)
+ // TODO(r): anonymous names
+ if !present || !isExported(wireField.Name) {
+- op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp))
++ op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
+ engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
+ continue
+ }
+diff --git a/src/encoding/gob/gobencdec_test.go b/src/encoding/gob/gobencdec_test.go
+index 6d2c8db..1b52ecc 100644
+--- a/src/encoding/gob/gobencdec_test.go
++++ b/src/encoding/gob/gobencdec_test.go
+@@ -12,6 +12,7 @@ import (
+ "fmt"
+ "io"
+ "net"
++ "reflect"
+ "strings"
+ "testing"
+ "time"
+@@ -796,3 +797,26 @@ func TestNetIP(t *testing.T) {
+ t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
+ }
+ }
++
++func TestIngoreDepthLimit(t *testing.T) {
++ // We don't test the actual depth limit because it requires building an
++ // extremely large message, which takes quite a while.
++ oldNestingDepth := maxIgnoreNestingDepth
++ maxIgnoreNestingDepth = 100
++ defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
++ b := new(bytes.Buffer)
++ enc := NewEncoder(b)
++ typ := reflect.TypeOf(int(0))
++ nested := reflect.ArrayOf(1, typ)
++ for i := 0; i < 100; i++ {
++ nested = reflect.ArrayOf(1, nested)
++ }
++ badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
++ enc.Encode(badStruct.Interface())
++ dec := NewDecoder(b)
++ var output struct{ Hello int }
++ expectedErr := "invalid nesting depth"
++ if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
++ t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch
new file mode 100644
index 0000000000..aab98e99fd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-32148.patch
@@ -0,0 +1,49 @@
+From 0fe3adec199e8cd2c101933f75d8cd617de70350 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 26 Aug 2022 12:48:13 +0530
+Subject: [PATCH] CVE-2022-32148
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ed2f33e1a7e0d18f61bd56f7ee067331d612c27e]
+CVE: CVE-2022-32148
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/net/http/header.go | 6 ++++++
+ src/net/http/header_test.go | 5 +++++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/src/net/http/header.go b/src/net/http/header.go
+index b9b5391..221f613 100644
+--- a/src/net/http/header.go
++++ b/src/net/http/header.go
+@@ -100,6 +100,12 @@ func (h Header) Clone() Header {
+ sv := make([]string, nv) // shared backing array for headers' values
+ h2 := make(Header, len(h))
+ for k, vv := range h {
++ if vv == nil {
++ // Preserve nil values. ReverseProxy distinguishes
++ // between nil and zero-length header values.
++ h2[k] = nil
++ continue
++ }
+ n := copy(sv, vv)
+ h2[k] = sv[:n:n]
+ sv = sv[n:]
+diff --git a/src/net/http/header_test.go b/src/net/http/header_test.go
+index 4789362..80c0035 100644
+--- a/src/net/http/header_test.go
++++ b/src/net/http/header_test.go
+@@ -235,6 +235,11 @@ func TestCloneOrMakeHeader(t *testing.T) {
+ in: Header{"foo": {"bar"}},
+ want: Header{"foo": {"bar"}},
+ },
++ {
++ name: "nil value",
++ in: Header{"foo": nil},
++ want: Header{"foo": nil},
++ },
+ }
+
+ for _, tt := range tests {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch
new file mode 100644
index 0000000000..15fda7de1b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-32189.patch
@@ -0,0 +1,113 @@
+From 027e7e1578d3d7614f7586eff3894b83d9709e14 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 29 Aug 2022 10:08:34 +0530
+Subject: [PATCH] CVE-2022-32189
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/703c8ab7e5ba75c95553d4e249309297abad7102]
+CVE: CVE-2022-32189
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/math/big/floatmarsh.go | 7 +++++++
+ src/math/big/floatmarsh_test.go | 12 ++++++++++++
+ src/math/big/ratmarsh.go | 6 ++++++
+ src/math/big/ratmarsh_test.go | 12 ++++++++++++
+ 4 files changed, 37 insertions(+)
+
+diff --git a/src/math/big/floatmarsh.go b/src/math/big/floatmarsh.go
+index d1c1dab..990e085 100644
+--- a/src/math/big/floatmarsh.go
++++ b/src/math/big/floatmarsh.go
+@@ -8,6 +8,7 @@ package big
+
+ import (
+ "encoding/binary"
++ "errors"
+ "fmt"
+ )
+
+@@ -67,6 +68,9 @@ func (z *Float) GobDecode(buf []byte) error {
+ *z = Float{}
+ return nil
+ }
++ if len(buf) < 6 {
++ return errors.New("Float.GobDecode: buffer too small")
++ }
+
+ if buf[0] != floatGobVersion {
+ return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
+@@ -83,6 +87,9 @@ func (z *Float) GobDecode(buf []byte) error {
+ z.prec = binary.BigEndian.Uint32(buf[2:])
+
+ if z.form == finite {
++ if len(buf) < 10 {
++ return errors.New("Float.GobDecode: buffer too small for finite form float")
++ }
+ z.exp = int32(binary.BigEndian.Uint32(buf[6:]))
+ z.mant = z.mant.setBytes(buf[10:])
+ }
+diff --git a/src/math/big/floatmarsh_test.go b/src/math/big/floatmarsh_test.go
+index c056d78..401f45a 100644
+--- a/src/math/big/floatmarsh_test.go
++++ b/src/math/big/floatmarsh_test.go
+@@ -137,3 +137,15 @@ func TestFloatJSONEncoding(t *testing.T) {
+ }
+ }
+ }
++
++func TestFloatGobDecodeShortBuffer(t *testing.T) {
++ for _, tc := range [][]byte{
++ []byte{0x1, 0x0, 0x0, 0x0},
++ []byte{0x1, 0xfa, 0x0, 0x0, 0x0, 0x0},
++ } {
++ err := NewFloat(0).GobDecode(tc)
++ if err == nil {
++ t.Error("expected GobDecode to return error for malformed input")
++ }
++ }
++}
+diff --git a/src/math/big/ratmarsh.go b/src/math/big/ratmarsh.go
+index fbc7b60..56102e8 100644
+--- a/src/math/big/ratmarsh.go
++++ b/src/math/big/ratmarsh.go
+@@ -45,12 +45,18 @@ func (z *Rat) GobDecode(buf []byte) error {
+ *z = Rat{}
+ return nil
+ }
++ if len(buf) < 5 {
++ return errors.New("Rat.GobDecode: buffer too small")
++ }
+ b := buf[0]
+ if b>>1 != ratGobVersion {
+ return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
+ }
+ const j = 1 + 4
+ i := j + binary.BigEndian.Uint32(buf[j-4:j])
++ if len(buf) < int(i) {
++ return errors.New("Rat.GobDecode: buffer too small")
++ }
+ z.a.neg = b&1 != 0
+ z.a.abs = z.a.abs.setBytes(buf[j:i])
+ z.b.abs = z.b.abs.setBytes(buf[i:])
+diff --git a/src/math/big/ratmarsh_test.go b/src/math/big/ratmarsh_test.go
+index 351d109..55a9878 100644
+--- a/src/math/big/ratmarsh_test.go
++++ b/src/math/big/ratmarsh_test.go
+@@ -123,3 +123,15 @@ func TestRatXMLEncoding(t *testing.T) {
+ }
+ }
+ }
++
++func TestRatGobDecodeShortBuffer(t *testing.T) {
++ for _, tc := range [][]byte{
++ []byte{0x2},
++ []byte{0x2, 0x0, 0x0, 0x0, 0xff},
++ } {
++ err := NewRat(1, 2).GobDecode(tc)
++ if err == nil {
++ t.Error("expected GobDecode to return error for malformed input")
++ }
++ }
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch
new file mode 100644
index 0000000000..fac0ebe94c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41715.patch
@@ -0,0 +1,271 @@
+From e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Wed, 28 Sep 2022 11:18:51 -0400
+Subject: [PATCH] [release-branch.go1.18] regexp: limit size of parsed regexps
+
+Set a 128 MB limit on the amount of space used by []syntax.Inst
+in the compiled form corresponding to a given regexp.
+
+Also set a 128 MB limit on the rune storage in the *syntax.Regexp
+tree itself.
+
+Thanks to Adam Korczynski (ADA Logics) and OSS-Fuzz for reporting this issue.
+
+Fixes CVE-2022-41715.
+Updates #55949.
+Fixes #55950.
+
+Change-Id: Ia656baed81564436368cf950e1c5409752f28e1b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1592136
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/438501
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/e9017c2416ad0ef642f5e0c2eab2dbf3cba4d997]
+CVE: CVE-2022-41715
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ src/regexp/syntax/parse.go | 145 ++++++++++++++++++++++++++++++--
+ src/regexp/syntax/parse_test.go | 13 +--
+ 2 files changed, 148 insertions(+), 10 deletions(-)
+
+diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
+index 55bd20d..60491d5 100644
+--- a/src/regexp/syntax/parse.go
++++ b/src/regexp/syntax/parse.go
+@@ -90,15 +90,49 @@ const (
+ // until we've allocated at least maxHeight Regexp structures.
+ const maxHeight = 1000
+
++// maxSize is the maximum size of a compiled regexp in Insts.
++// It too is somewhat arbitrarily chosen, but the idea is to be large enough
++// to allow significant regexps while at the same time small enough that
++// the compiled form will not take up too much memory.
++// 128 MB is enough for a 3.3 million Inst structures, which roughly
++// corresponds to a 3.3 MB regexp.
++const (
++ maxSize = 128 << 20 / instSize
++ instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words
++)
++
++// maxRunes is the maximum number of runes allowed in a regexp tree
++// counting the runes in all the nodes.
++// Ignoring character classes p.numRunes is always less than the length of the regexp.
++// Character classes can make it much larger: each \pL adds 1292 runes.
++// 128 MB is enough for 32M runes, which is over 26k \pL instances.
++// Note that repetitions do not make copies of the rune slices,
++// so \pL{1000} is only one rune slice, not 1000.
++// We could keep a cache of character classes we've seen,
++// so that all the \pL we see use the same rune list,
++// but that doesn't remove the problem entirely:
++// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()].
++// And because the Rune slice is exposed directly in the Regexp,
++// there is not an opportunity to change the representation to allow
++// partial sharing between different character classes.
++// So the limit is the best we can do.
++const (
++ maxRunes = 128 << 20 / runeSize
++ runeSize = 4 // rune is int32
++)
++
+ type parser struct {
+ flags Flags // parse mode flags
+ stack []*Regexp // stack of parsed expressions
+ free *Regexp
+ numCap int // number of capturing groups seen
+ wholeRegexp string
+- tmpClass []rune // temporary char class work space
+- numRegexp int // number of regexps allocated
+- height map[*Regexp]int // regexp height for height limit check
++ tmpClass []rune // temporary char class work space
++ numRegexp int // number of regexps allocated
++ numRunes int // number of runes in char classes
++ repeats int64 // product of all repetitions seen
++ height map[*Regexp]int // regexp height, for height limit check
++ size map[*Regexp]int64 // regexp compiled size, for size limit check
+ }
+
+ func (p *parser) newRegexp(op Op) *Regexp {
+@@ -122,6 +156,104 @@ func (p *parser) reuse(re *Regexp) {
+ p.free = re
+ }
+
++func (p *parser) checkLimits(re *Regexp) {
++ if p.numRunes > maxRunes {
++ panic(ErrInternalError)
++ }
++ p.checkSize(re)
++ p.checkHeight(re)
++}
++
++func (p *parser) checkSize(re *Regexp) {
++ if p.size == nil {
++ // We haven't started tracking size yet.
++ // Do a relatively cheap check to see if we need to start.
++ // Maintain the product of all the repeats we've seen
++ // and don't track if the total number of regexp nodes
++ // we've seen times the repeat product is in budget.
++ if p.repeats == 0 {
++ p.repeats = 1
++ }
++ if re.Op == OpRepeat {
++ n := re.Max
++ if n == -1 {
++ n = re.Min
++ }
++ if n <= 0 {
++ n = 1
++ }
++ if int64(n) > maxSize/p.repeats {
++ p.repeats = maxSize
++ } else {
++ p.repeats *= int64(n)
++ }
++ }
++ if int64(p.numRegexp) < maxSize/p.repeats {
++ return
++ }
++
++ // We need to start tracking size.
++ // Make the map and belatedly populate it
++ // with info about everything we've constructed so far.
++ p.size = make(map[*Regexp]int64)
++ for _, re := range p.stack {
++ p.checkSize(re)
++ }
++ }
++
++ if p.calcSize(re, true) > maxSize {
++ panic(ErrInternalError)
++ }
++}
++
++func (p *parser) calcSize(re *Regexp, force bool) int64 {
++ if !force {
++ if size, ok := p.size[re]; ok {
++ return size
++ }
++ }
++
++ var size int64
++ switch re.Op {
++ case OpLiteral:
++ size = int64(len(re.Rune))
++ case OpCapture, OpStar:
++ // star can be 1+ or 2+; assume 2 pessimistically
++ size = 2 + p.calcSize(re.Sub[0], false)
++ case OpPlus, OpQuest:
++ size = 1 + p.calcSize(re.Sub[0], false)
++ case OpConcat:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ case OpAlternate:
++ for _, sub := range re.Sub {
++ size += p.calcSize(sub, false)
++ }
++ if len(re.Sub) > 1 {
++ size += int64(len(re.Sub)) - 1
++ }
++ case OpRepeat:
++ sub := p.calcSize(re.Sub[0], false)
++ if re.Max == -1 {
++ if re.Min == 0 {
++ size = 2 + sub // x*
++ } else {
++ size = 1 + int64(re.Min)*sub // xxx+
++ }
++ break
++ }
++ // x{2,5} = xx(x(x(x)?)?)?
++ size = int64(re.Max)*sub + int64(re.Max-re.Min)
++ }
++
++ if size < 1 {
++ size = 1
++ }
++ p.size[re] = size
++ return size
++}
++
+ func (p *parser) checkHeight(re *Regexp) {
+ if p.numRegexp < maxHeight {
+ return
+@@ -158,6 +290,7 @@ func (p *parser) calcHeight(re *Regexp, force bool) int {
+
+ // push pushes the regexp re onto the parse stack and returns the regexp.
+ func (p *parser) push(re *Regexp) *Regexp {
++ p.numRunes += len(re.Rune)
+ if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] {
+ // Single rune.
+ if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) {
+@@ -189,7 +322,7 @@ func (p *parser) push(re *Regexp) *Regexp {
+ }
+
+ p.stack = append(p.stack, re)
+- p.checkHeight(re)
++ p.checkLimits(re)
+ return re
+ }
+
+@@ -305,7 +438,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
+ re.Sub = re.Sub0[:1]
+ re.Sub[0] = sub
+ p.stack[n-1] = re
+- p.checkHeight(re)
++ p.checkLimits(re)
+
+ if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) {
+ return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]}
+@@ -509,6 +642,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+
+ for j := start; j < i; j++ {
+ sub[j] = p.removeLeadingString(sub[j], len(str))
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+@@ -566,6 +700,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp {
+ for j := start; j < i; j++ {
+ reuse := j != start // prefix came from sub[start]
+ sub[j] = p.removeLeadingRegexp(sub[j], reuse)
++ p.checkLimits(sub[j])
+ }
+ suffix := p.collapse(sub[start:i], OpAlternate) // recurse
+
+diff --git a/src/regexp/syntax/parse_test.go b/src/regexp/syntax/parse_test.go
+index 1ef6d8a..67e3c56 100644
+--- a/src/regexp/syntax/parse_test.go
++++ b/src/regexp/syntax/parse_test.go
+@@ -484,12 +484,15 @@ var invalidRegexps = []string{
+ `(?P<>a)`,
+ `[a-Z]`,
+ `(?i)[a-Z]`,
+- `a{100000}`,
+- `a{100000,}`,
+- "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})",
+- strings.Repeat("(", 1000) + strings.Repeat(")", 1000),
+- strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000),
+ `\Q\E*`,
++ `a{100000}`, // too much repetition
++ `a{100000,}`, // too much repetition
++ "((((((((((x{2}){2}){2}){2}){2}){2}){2}){2}){2}){2})", // too much repetition
++ strings.Repeat("(", 1000) + strings.Repeat(")", 1000), // too deep
++ strings.Repeat("(?:", 1000) + strings.Repeat(")*", 1000), // too deep
++ "(" + strings.Repeat("(xx?)", 1000) + "){1000}", // too long
++ strings.Repeat("(xx?){1000}", 1000), // too long
++ strings.Repeat(`\pL`, 27000), // too many runes
+ }
+
+ var onlyPerl = []string{
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch
new file mode 100644
index 0000000000..8bf22ee4d4
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41717.patch
@@ -0,0 +1,75 @@
+From 618120c165669c00a1606505defea6ca755cdc27 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 30 Nov 2022 16:46:33 -0500
+Subject: [PATCH] [release-branch.go1.19] net/http: update bundled
+ golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+For #56350.
+For #57009.
+Fixes CVE-2022-41717.
+
+Change-Id: I5c6ce546add81f361dcf0d5123fa4eaaf8f0a03b
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1663835
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/455363
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Jenny Rakoczy <jenny@golang.org>
+Reviewed-by: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/618120c165669c00a1606505defea6ca755cdc27]
+CVE-2022-41717
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/http/h2_bundle.go | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
+index 83f2a72..cc03a62 100644
+--- a/src/net/http/h2_bundle.go
++++ b/src/net/http/h2_bundle.go
+@@ -4096,6 +4096,7 @@ type http2serverConn struct {
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
++ canonHeaderKeysSize int // canonHeader keys size in bytes
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+@@ -4278,6 +4279,13 @@ func (sc *http2serverConn) condlogf(err error, format string, args ...interface{
+ }
+ }
+
++// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
++// of the entries in the canonHeader cache.
++// This should be larger than the size of unique, uncommon header keys likely to
++// be sent by the peer, while not so high as to permit unreasonable memory usage
++// if the peer sends an unbounded number of unique header keys.
++const http2maxCachedCanonicalHeadersKeysSize = 2048
++
+ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ http2buildCommonHeaderMapsOnce()
+@@ -4293,14 +4301,10 @@ func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+- // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
+- // entries in the canonHeader cache. This should be larger than the number
+- // of unique, uncommon header keys likely to be sent by the peer, while not
+- // so high as to permit unreaasonable memory usage if the peer sends an unbounded
+- // number of unique header keys.
+- const maxCachedCanonicalHeaders = 32
+- if len(sc.canonHeader) < maxCachedCanonicalHeaders {
++ size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
++ if sc.canonHeaderKeysSize+size <= http2maxCachedCanonicalHeadersKeysSize {
+ sc.canonHeader[v] = cv
++ sc.canonHeaderKeysSize += size
+ }
+ return cv
+ }
+--
+2.30.2
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch
new file mode 100644
index 0000000000..f5bffd7a0b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-1.patch
@@ -0,0 +1,53 @@
+From 94e0c36694fb044e81381d112fef3692de7cdf52 Mon Sep 17 00:00:00 2001
+From: Yasuhiro Matsumoto <mattn.jp@gmail.com>
+Date: Fri, 22 Apr 2022 10:07:51 +0900
+Subject: [PATCH 1/2] path/filepath: do not remove prefix "." when following
+ path contains ":".
+
+Fixes #52476
+
+Change-Id: I9eb72ac7dbccd6322d060291f31831dc389eb9bb
+Reviewed-on: https://go-review.googlesource.com/c/go/+/401595
+Auto-Submit: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Alex Brainman <alex.brainman@gmail.com>
+Run-TryBot: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/9cd1818a7d019c02fa4898b3e45a323e35033290
+CVE: CVE-2022-41722
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/path/filepath/path.go | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
+index 26f1833..92dc090 100644
+--- a/src/path/filepath/path.go
++++ b/src/path/filepath/path.go
+@@ -116,9 +116,21 @@ func Clean(path string) string {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+- case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
++ case path[r] == '.' && r+1 == n:
+ // . element
+ r++
++ case path[r] == '.' && os.IsPathSeparator(path[r+1]):
++ // ./ element
++ r++
++
++ for r < len(path) && os.IsPathSeparator(path[r]) {
++ r++
++ }
++ if out.w == 0 && volumeNameLen(path[r:]) > 0 {
++ // When joining prefix "." and an absolute path on Windows,
++ // the prefix should not be removed.
++ out.append('.')
++ }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch
new file mode 100644
index 0000000000..e1f7a55581
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41722-2.patch
@@ -0,0 +1,104 @@
+From b8803cb711ae163b8e67897deb6cf8c49702227c Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 12 Dec 2022 16:43:37 -0800
+Subject: [PATCH 2/2] path/filepath: do not Clean("a/../c:/b") into c:\b on
+ Windows
+
+Do not permit Clean to convert a relative path into one starting
+with a drive reference. This change causes Clean to insert a .
+path element at the start of a path when the original path does not
+start with a volume name, and the first path element would contain
+a colon.
+
+This may introduce a spurious but harmless . path element under
+some circumstances. For example, Clean("a/../b:/../c") becomes `.\c`.
+
+This reverts CL 401595, since the change here supersedes the one
+in that CL.
+
+Thanks to RyotaK (https://twitter.com/ryotkak) for reporting this issue.
+
+Updates #57274
+Fixes #57276
+Fixes CVE-2022-41722
+
+Change-Id: I837446285a03aa74c79d7642720e01f354c2ca17
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1675249
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+(cherry picked from commit 8ca37f4813ef2f64600c92b83f17c9f3ca6c03a5)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728944
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468119
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/bdf07c2e168baf736e4c057279ca12a4d674f18c
+CVE: CVE-2022-41722
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/path/filepath/path.go | 27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go
+index 92dc090..f0f095e 100644
+--- a/src/path/filepath/path.go
++++ b/src/path/filepath/path.go
+@@ -14,6 +14,7 @@ package filepath
+ import (
+ "errors"
+ "os"
++ "runtime"
+ "sort"
+ "strings"
+ )
+@@ -116,21 +117,9 @@ func Clean(path string) string {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+- case path[r] == '.' && r+1 == n:
++ case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])):
+ // . element
+ r++
+- case path[r] == '.' && os.IsPathSeparator(path[r+1]):
+- // ./ element
+- r++
+-
+- for r < len(path) && os.IsPathSeparator(path[r]) {
+- r++
+- }
+- if out.w == 0 && volumeNameLen(path[r:]) > 0 {
+- // When joining prefix "." and an absolute path on Windows,
+- // the prefix should not be removed.
+- out.append('.')
+- }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+@@ -156,6 +145,18 @@ func Clean(path string) string {
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(Separator)
+ }
++ // If a ':' appears in the path element at the start of a Windows path,
++ // insert a .\ at the beginning to avoid converting relative paths
++ // like a/../c: into c:.
++ if runtime.GOOS == "windows" && out.w == 0 && out.volLen == 0 && r != 0 {
++ for i := r; i < n && !os.IsPathSeparator(path[i]); i++ {
++ if path[i] == ':' {
++ out.append('.')
++ out.append(Separator)
++ break
++ }
++ }
++ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch
new file mode 100644
index 0000000000..a93fa31dcd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41723.patch
@@ -0,0 +1,156 @@
+From 451766789f646617157c725e20c955d4a9a70d4e Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 6 Feb 2023 10:03:44 -0800
+Subject: [PATCH] net/http: update bundled golang.org/x/net/http2
+
+Disable cmd/internal/moddeps test, since this update includes PRIVATE
+track fixes.
+
+Fixes CVE-2022-41723
+Fixes #58355
+Updates #57855
+
+Change-Id: Ie870562a6f6e44e4e8f57db6a0dde1a41a2b090c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728939
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468118
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+Reviewed-by: Than McIntosh <thanm@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5c3e11bd0b5c0a86e5beffcd4339b86a902b21c3]
+CVE: CVE-2022-41723
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/vendor/golang.org/x/net/http2/hpack/hpack.go | 79 +++++++++++++++---------
+ 1 file changed, 49 insertions(+), 30 deletions(-)
+
+diff --git a/src/vendor/golang.org/x/net/http2/hpack/hpack.go b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+index 85f18a2..02e80e3 100644
+--- a/src/vendor/golang.org/x/net/http2/hpack/hpack.go
++++ b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+@@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
++ var undecodedName undecodedString
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+@@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ }
+ hf.Name = ihf.Name
+ } else {
+- hf.Name, buf, err = d.readString(buf, wantStr)
++ undecodedName, buf, err = d.readString(buf)
+ if err != nil {
+ return err
+ }
+ }
+- hf.Value, buf, err = d.readString(buf, wantStr)
++ undecodedValue, buf, err := d.readString(buf)
+ if err != nil {
+ return err
+ }
++ if wantStr {
++ if nameIdx <= 0 {
++ hf.Name, err = d.decodeString(undecodedName)
++ if err != nil {
++ return err
++ }
++ }
++ hf.Value, err = d.decodeString(undecodedValue)
++ if err != nil {
++ return err
++ }
++ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+@@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ return 0, origP, errNeedMore
+ }
+
+-// readString decodes an hpack string from p.
++// readString reads an hpack string from p.
+ //
+-// wantStr is whether s will be used. If false, decompression and
+-// []byte->string garbage are skipped if s will be ignored
+-// anyway. This does mean that huffman decoding errors for non-indexed
+-// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+-// is returning an error anyway, and because they're not indexed, the error
+-// won't affect the decoding state.
+-func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
++// It returns a reference to the encoded string data to permit deferring decode costs
++// until after the caller verifies all data is present.
++func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
+ if len(p) == 0 {
+- return "", p, errNeedMore
++ return u, p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+- return "", p, err
++ return u, p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+- return "", nil, ErrStringLength
++ // Returning an error here means Huffman decoding errors
++ // for non-indexed strings past the maximum string length
++ // are ignored, but the server is returning an error anyway
++ // and because the string is not indexed the error will not
++ // affect the decoding state.
++ return u, nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+- return "", p, errNeedMore
+- }
+- if !isHuff {
+- if wantStr {
+- s = string(p[:strLen])
+- }
+- return s, p[strLen:], nil
++ return u, p, errNeedMore
+ }
++ u.isHuff = isHuff
++ u.b = p[:strLen]
++ return u, p[strLen:], nil
++}
+
+- if wantStr {
+- buf := bufPool.Get().(*bytes.Buffer)
+- buf.Reset() // don't trust others
+- defer bufPool.Put(buf)
+- if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+- buf.Reset()
+- return "", nil, err
+- }
++type undecodedString struct {
++ isHuff bool
++ b []byte
++}
++
++func (d *Decoder) decodeString(u undecodedString) (string, error) {
++ if !u.isHuff {
++ return string(u.b), nil
++ }
++ buf := bufPool.Get().(*bytes.Buffer)
++ buf.Reset() // don't trust others
++ var s string
++ err := huffmanDecode(buf, d.maxStrLen, u.b)
++ if err == nil {
+ s = buf.String()
+- buf.Reset() // be nice to GC
+ }
+- return s, p[strLen:], nil
++ buf.Reset() // be nice to GC
++ bufPool.Put(buf)
++ return s, err
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch
new file mode 100644
index 0000000000..37ebc41947
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre1.patch
@@ -0,0 +1,85 @@
+From 874b3132a84cf76da6a48978826c04c380a37a50 Mon Sep 17 00:00:00 2001
+From: avivklas <avivklas@gmail.com>
+Date: Fri, 7 Aug 2020 21:50:12 +0300
+Subject: [PATCH] mime/multipart: return overflow errors in Reader.ReadForm
+
+Updates Reader.ReadForm to check for overflow errors that may
+result from a leeway addition of 10MiB to the input argument
+maxMemory.
+
+Fixes #40430
+
+Change-Id: I510b8966c95c51d04695ba9d08fcfe005fd11a5d
+Reviewed-on: https://go-review.googlesource.com/c/go/+/247477
+Run-TryBot: Emmanuel Odeke <emm.odeke@gmail.com>
+Trust: Cuong Manh Le <cuong.manhle.vn@gmail.com>
+Trust: Emmanuel Odeke <emm.odeke@gmail.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/874b3132a84cf76da6a48978826c04c380a37a50]
+CVE: CVE-2022-41725 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 4 ++++
+ src/mime/multipart/formdata_test.go | 18 ++++++++++++++++++
+ 2 files changed, 22 insertions(+)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 832d0ad693666..4eb31012941ac 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "errors"
++ "fmt"
+ "io"
+ "io/ioutil"
+ "net/textproto"
+@@ -41,6 +42,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+
+ // Reserve an additional 10 MB for non-file parts.
+ maxValueBytes := maxMemory + int64(10<<20)
++ if maxValueBytes <= 0 {
++ return nil, fmt.Errorf("multipart: integer overflow from maxMemory(%d) + 10MiB for non-file parts", maxMemory)
++ }
+ for {
+ p, err := r.NextPart()
+ if err == io.EOF {
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 7d756c8c244a0..7112e0d3727fe 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "io"
++ "math"
+ "os"
+ "strings"
+ "testing"
+@@ -52,6 +53,23 @@ func TestReadFormWithNamelessFile(t *testing.T) {
+ }
+ }
+
++// Issue 40430: Ensure that we report integer overflows in additions of maxMemory,
++// instead of silently and subtly failing without indication.
++func TestReadFormMaxMemoryOverflow(t *testing.T) {
++ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
++ r := NewReader(b, boundary)
++ f, err := r.ReadForm(math.MaxInt64)
++ if err == nil {
++ t.Fatal("Unexpected a non-nil error")
++ }
++ if f != nil {
++ t.Fatalf("Unexpected returned a non-nil form: %v\n", f)
++ }
++ if g, w := err.Error(), "integer overflow from maxMemory"; !strings.Contains(g, w) {
++ t.Errorf(`Error mismatch\n%q\ndid not contain\n%q`, g, w)
++ }
++}
++
+ func TestReadFormWithTextContentType(t *testing.T) {
+ // From https://github.com/golang/go/issues/24041
+ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch
new file mode 100644
index 0000000000..b951ee893e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre2.patch
@@ -0,0 +1,97 @@
+From 4e5a313524da62600eb59dbf98624cfe946456f8 Mon Sep 17 00:00:00 2001
+From: Emmanuel T Odeke <emmanuel@orijtech.com>
+Date: Tue, 20 Oct 2020 04:11:12 -0700
+Subject: [PATCH] net/http: test that ParseMultipartForm catches overflows
+
+Tests that if the combination of:
+* HTTP multipart file payload size
+* ParseMultipartForm's maxMemory parameter
+* the internal leeway buffer size of 10MiB
+
+overflows, then we'll report an overflow instead of silently
+passing.
+
+Reapplies and fixes CL 254977, which was reverted in CL 263658.
+
+The prior test lacked a res.Body.Close(), so fixed that and
+added a leaked Transport check to verify correctness.
+
+Updates 40430.
+
+Change-Id: I3c0f7ef43d621f6eb00f07755f04f9f36c51f98f
+Reviewed-on: https://go-review.googlesource.com/c/go/+/263817
+Run-TryBot: Emmanuel Odeke <emm.odeke@gmail.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Bryan C. Mills <bcmills@google.com>
+Trust: Damien Neil <dneil@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/4e5a313524da62600eb59dbf98624cfe946456f8]
+CVE: CVE-2022-41725 #Dependency Patch2
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/request_test.go | 45 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index b4ef472e71229..19526b9ad791a 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -13,6 +13,7 @@ import (
+ "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "mime/multipart"
+ . "net/http"
+ "net/http/httptest"
+@@ -245,6 +246,50 @@ func TestParseMultipartForm(t *testing.T) {
+ }
+ }
+
++// Issue #40430: Test that if maxMemory for ParseMultipartForm when combined with
++// the payload size and the internal leeway buffer size of 10MiB overflows, that we
++// correctly return an error.
++func TestMaxInt64ForMultipartFormMaxMemoryOverflow(t *testing.T) {
++ defer afterTest(t)
++
++ payloadSize := 1 << 10
++ cst := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
++ // The combination of:
++ // MaxInt64 + payloadSize + (internal spare of 10MiB)
++ // triggers the overflow. See issue https://golang.org/issue/40430/
++ if err := req.ParseMultipartForm(math.MaxInt64); err != nil {
++ Error(rw, err.Error(), StatusBadRequest)
++ return
++ }
++ }))
++ defer cst.Close()
++ fBuf := new(bytes.Buffer)
++ mw := multipart.NewWriter(fBuf)
++ mf, err := mw.CreateFormFile("file", "myfile.txt")
++ if err != nil {
++ t.Fatal(err)
++ }
++ if _, err := mf.Write(bytes.Repeat([]byte("abc"), payloadSize)); err != nil {
++ t.Fatal(err)
++ }
++ if err := mw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ req, err := NewRequest("POST", cst.URL, fBuf)
++ if err != nil {
++ t.Fatal(err)
++ }
++ req.Header.Set("Content-Type", mw.FormDataContentType())
++ res, err := cst.Client().Do(req)
++ if err != nil {
++ t.Fatal(err)
++ }
++ res.Body.Close()
++ if g, w := res.StatusCode, StatusBadRequest; g != w {
++ t.Fatalf("Status code mismatch: got %d, want %d", g, w)
++ }
++}
++
+ func TestRedirect_h1(t *testing.T) { testRedirect(t, h1Mode) }
+ func TestRedirect_h2(t *testing.T) { testRedirect(t, h2Mode) }
+ func testRedirect(t *testing.T, h2 bool) {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch
new file mode 100644
index 0000000000..767225b888
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725-pre3.patch
@@ -0,0 +1,98 @@
+From 5246fa5e75b129a7dbd9722aa4de0cbaf7ceae43 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 3 Dec 2020 09:45:07 -0500
+Subject: [PATCH] mime/multipart: handle ReadForm(math.MaxInt64) better
+
+Returning an error about integer overflow is needlessly pedantic.
+The meaning of ReadForm(MaxInt64) is easily understood
+(accept a lot of data) and can be implemented.
+
+Fixes #40430.
+
+Change-Id: I8a522033dd9a2f9ad31dd2ad82cf08d553736ab9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/275112
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Ian Lance Taylor <iant@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5246fa5e75b129a7dbd9722aa4de0cbaf7ceae43]
+CVE: CVE-2022-41725 #Dependency Patch3
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 8 ++++++--
+ src/mime/multipart/formdata_test.go | 14 +++++---------
+ src/net/http/request_test.go | 2 +-
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 4eb31012941ac..9c42ea8c023b5 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,9 +7,9 @@ package multipart
+ import (
+ "bytes"
+ "errors"
+- "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "net/textproto"
+ "os"
+ )
+@@ -43,7 +43,11 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ // Reserve an additional 10 MB for non-file parts.
+ maxValueBytes := maxMemory + int64(10<<20)
+ if maxValueBytes <= 0 {
+- return nil, fmt.Errorf("multipart: integer overflow from maxMemory(%d) + 10MiB for non-file parts", maxMemory)
++ if maxMemory < 0 {
++ maxValueBytes = 0
++ } else {
++ maxValueBytes = math.MaxInt64
++ }
+ }
+ for {
+ p, err := r.NextPart()
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 7112e0d3727fe..e3a3a3eae8e15 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -53,20 +53,16 @@ func TestReadFormWithNamelessFile(t *testing.T) {
+ }
+ }
+
+-// Issue 40430: Ensure that we report integer overflows in additions of maxMemory,
+-// instead of silently and subtly failing without indication.
++// Issue 40430: Handle ReadForm(math.MaxInt64)
+ func TestReadFormMaxMemoryOverflow(t *testing.T) {
+ b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n"))
+ r := NewReader(b, boundary)
+ f, err := r.ReadForm(math.MaxInt64)
+- if err == nil {
+- t.Fatal("Unexpected a non-nil error")
+- }
+- if f != nil {
+- t.Fatalf("Unexpected returned a non-nil form: %v\n", f)
++ if err != nil {
++ t.Fatalf("ReadForm(MaxInt64): %v", err)
+ }
+- if g, w := err.Error(), "integer overflow from maxMemory"; !strings.Contains(g, w) {
+- t.Errorf(`Error mismatch\n%q\ndid not contain\n%q`, g, w)
++ if f == nil {
++ t.Fatal("ReadForm(MaxInt64): missing form")
+ }
+ }
+
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 19526b9ad791a..689498e19d5dd 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -285,7 +285,7 @@ func TestMaxInt64ForMultipartFormMaxMemoryOverflow(t *testing.T) {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+- if g, w := res.StatusCode, StatusBadRequest; g != w {
++ if g, w := res.StatusCode, StatusOK; g != w {
+ t.Fatalf("Status code mismatch: got %d, want %d", g, w)
+ }
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch
new file mode 100644
index 0000000000..5f80c62b0b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2022-41725.patch
@@ -0,0 +1,660 @@
+From 5c55ac9bf1e5f779220294c843526536605f42ab Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 25 Jan 2023 09:27:01 -0800
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: limit memory/inode consumption of ReadForm
+
+Reader.ReadForm is documented as storing "up to maxMemory bytes + 10MB"
+in memory. Parsed forms can consume substantially more memory than
+this limit, since ReadForm does not account for map entry overhead
+and MIME headers.
+
+In addition, while the amount of disk memory consumed by ReadForm can
+be constrained by limiting the size of the parsed input, ReadForm will
+create one temporary file per form part stored on disk, potentially
+consuming a large number of inodes.
+
+Update ReadForm's memory accounting to include part names,
+MIME headers, and map entry overhead.
+
+Update ReadForm to store all on-disk file parts in a single
+temporary file.
+
+Files returned by FileHeader.Open are documented as having a concrete
+type of *os.File when a file is stored on disk. The change to use a
+single temporary file for all parts means that this is no longer the
+case when a form contains more than a single file part stored on disk.
+
+The previous behavior of storing each file part in a separate disk
+file may be reenabled with GODEBUG=multipartfiles=distinct.
+
+Update Reader.NextPart and Reader.NextRawPart to set a 10MiB cap
+on the size of MIME headers.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+Updates #58006
+Fixes #58362
+Fixes CVE-2022-41725
+
+Change-Id: Ibd780a6c4c83ac8bcfd3cbe344f042e9940f2eab
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1714276
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit ed4664330edcd91b24914c9371c377c132dbce8c)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1728949
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/468116
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Than McIntosh <thanm@google.com>
+Run-TryBot: Michael Pratt <mpratt@google.com>
+Auto-Submit: Michael Pratt <mpratt@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5c55ac9bf1e5f779220294c843526536605f42ab]
+CVE: CVE-2022-41725
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 132 ++++++++++++++++++++-----
+ src/mime/multipart/formdata_test.go | 140 ++++++++++++++++++++++++++-
+ src/mime/multipart/multipart.go | 25 +++--
+ src/mime/multipart/readmimeheader.go | 14 +++
+ src/net/http/request_test.go | 2 +-
+ src/net/textproto/reader.go | 27 ++++++
+ 6 files changed, 303 insertions(+), 37 deletions(-)
+ create mode 100644 src/mime/multipart/readmimeheader.go
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 9c42ea8..1eeb340 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -7,6 +7,7 @@ package multipart
+ import (
+ "bytes"
+ "errors"
++ "internal/godebug"
+ "io"
+ "io/ioutil"
+ "math"
+@@ -34,23 +35,58 @@ func (r *Reader) ReadForm(maxMemory int64) (*Form, error) {
+
+ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ form := &Form{make(map[string][]string), make(map[string][]*FileHeader)}
++ var (
++ file *os.File
++ fileOff int64
++ )
++ numDiskFiles := 0
++ multipartFiles := godebug.Get("multipartfiles")
++ combineFiles := multipartFiles != "distinct"
+ defer func() {
++ if file != nil {
++ if cerr := file.Close(); err == nil {
++ err = cerr
++ }
++ }
++ if combineFiles && numDiskFiles > 1 {
++ for _, fhs := range form.File {
++ for _, fh := range fhs {
++ fh.tmpshared = true
++ }
++ }
++ }
+ if err != nil {
+ form.RemoveAll()
++ if file != nil {
++ os.Remove(file.Name())
++ }
+ }
+ }()
+
+- // Reserve an additional 10 MB for non-file parts.
+- maxValueBytes := maxMemory + int64(10<<20)
+- if maxValueBytes <= 0 {
++ // maxFileMemoryBytes is the maximum bytes of file data we will store in memory.
++ // Data past this limit is written to disk.
++ // This limit strictly applies to content, not metadata (filenames, MIME headers, etc.),
++ // since metadata is always stored in memory, not disk.
++ //
++ // maxMemoryBytes is the maximum bytes we will store in memory, including file content,
++ // non-file part values, metdata, and map entry overhead.
++ //
++ // We reserve an additional 10 MB in maxMemoryBytes for non-file data.
++ //
++ // The relationship between these parameters, as well as the overly-large and
++ // unconfigurable 10 MB added on to maxMemory, is unfortunate but difficult to change
++ // within the constraints of the API as documented.
++ maxFileMemoryBytes := maxMemory
++ maxMemoryBytes := maxMemory + int64(10<<20)
++ if maxMemoryBytes <= 0 {
+ if maxMemory < 0 {
+- maxValueBytes = 0
++ maxMemoryBytes = 0
+ } else {
+- maxValueBytes = math.MaxInt64
++ maxMemoryBytes = math.MaxInt64
+ }
+ }
+ for {
+- p, err := r.NextPart()
++ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+ break
+ }
+@@ -64,16 +100,27 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ filename := p.FileName()
+
++ // Multiple values for the same key (one map entry, longer slice) are cheaper
++ // than the same number of values for different keys (many map entries), but
++ // using a consistent per-value cost for overhead is simpler.
++ maxMemoryBytes -= int64(len(name))
++ maxMemoryBytes -= 100 // map overhead
++ if maxMemoryBytes < 0 {
++ // We can't actually take this path, since nextPart would already have
++ // rejected the MIME headers for being too large. Check anyway.
++ return nil, ErrMessageTooLarge
++ }
++
+ var b bytes.Buffer
+
+ if filename == "" {
+ // value, store as string in memory
+- n, err := io.CopyN(&b, p, maxValueBytes+1)
++ n, err := io.CopyN(&b, p, maxMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- maxValueBytes -= n
+- if maxValueBytes < 0 {
++ maxMemoryBytes -= n
++ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+ form.Value[name] = append(form.Value[name], b.String())
+@@ -81,35 +128,45 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ // file, store in memory or on disk
++ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ if maxMemoryBytes < 0 {
++ return nil, ErrMessageTooLarge
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+ }
+- n, err := io.CopyN(&b, p, maxMemory+1)
++ n, err := io.CopyN(&b, p, maxFileMemoryBytes+1)
+ if err != nil && err != io.EOF {
+ return nil, err
+ }
+- if n > maxMemory {
+- // too big, write to disk and flush buffer
+- file, err := ioutil.TempFile("", "multipart-")
+- if err != nil {
+- return nil, err
++ if n > maxFileMemoryBytes {
++ if file == nil {
++ file, err = ioutil.TempFile(r.tempDir, "multipart-")
++ if err != nil {
++ return nil, err
++ }
+ }
++ numDiskFiles++
+ size, err := io.Copy(file, io.MultiReader(&b, p))
+- if cerr := file.Close(); err == nil {
+- err = cerr
+- }
+ if err != nil {
+- os.Remove(file.Name())
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+ fh.Size = size
++ fh.tmpoff = fileOff
++ fileOff += size
++ if !combineFiles {
++ if err := file.Close(); err != nil {
++ return nil, err
++ }
++ file = nil
++ }
+ } else {
+ fh.content = b.Bytes()
+ fh.Size = int64(len(fh.content))
+- maxMemory -= n
+- maxValueBytes -= n
++ maxFileMemoryBytes -= n
++ maxMemoryBytes -= n
+ }
+ form.File[name] = append(form.File[name], fh)
+ }
+@@ -117,6 +174,17 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ return form, nil
+ }
+
++func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ for k, vs := range h {
++ size += int64(len(k))
++ size += 100 // map entry overhead
++ for _, v := range vs {
++ size += int64(len(v))
++ }
++ }
++ return size
++}
++
+ // Form is a parsed multipart form.
+ // Its File parts are stored either in memory or on disk,
+ // and are accessible via the *FileHeader's Open method.
+@@ -134,7 +202,7 @@ func (f *Form) RemoveAll() error {
+ for _, fh := range fhs {
+ if fh.tmpfile != "" {
+ e := os.Remove(fh.tmpfile)
+- if e != nil && err == nil {
++ if e != nil && !errors.Is(e, os.ErrNotExist) && err == nil {
+ err = e
+ }
+ }
+@@ -149,15 +217,25 @@ type FileHeader struct {
+ Header textproto.MIMEHeader
+ Size int64
+
+- content []byte
+- tmpfile string
++ content []byte
++ tmpfile string
++ tmpoff int64
++ tmpshared bool
+ }
+
+ // Open opens and returns the FileHeader's associated File.
+ func (fh *FileHeader) Open() (File, error) {
+ if b := fh.content; b != nil {
+ r := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
+- return sectionReadCloser{r}, nil
++ return sectionReadCloser{r, nil}, nil
++ }
++ if fh.tmpshared {
++ f, err := os.Open(fh.tmpfile)
++ if err != nil {
++ return nil, err
++ }
++ r := io.NewSectionReader(f, fh.tmpoff, fh.Size)
++ return sectionReadCloser{r, f}, nil
+ }
+ return os.Open(fh.tmpfile)
+ }
+@@ -176,8 +254,12 @@ type File interface {
+
+ type sectionReadCloser struct {
+ *io.SectionReader
++ io.Closer
+ }
+
+ func (rc sectionReadCloser) Close() error {
++ if rc.Closer != nil {
++ return rc.Closer.Close()
++ }
+ return nil
+ }
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index e3a3a3e..5cded71 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -6,8 +6,10 @@ package multipart
+
+ import (
+ "bytes"
++ "fmt"
+ "io"
+ "math"
++ "net/textproto"
+ "os"
+ "strings"
+ "testing"
+@@ -208,8 +210,8 @@ Content-Disposition: form-data; name="largetext"
+ maxMemory int64
+ err error
+ }{
+- {"smaller", 50, nil},
+- {"exact-fit", 25, nil},
++ {"smaller", 50 + int64(len("largetext")) + 100, nil},
++ {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+ {"too-large", 0, ErrMessageTooLarge},
+ }
+ for _, tc := range testCases {
+@@ -224,7 +226,7 @@ Content-Disposition: form-data; name="largetext"
+ defer f.RemoveAll()
+ }
+ if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", tc.err, err)
++ t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+ }
+ if err == nil {
+ if g := f.Value["largetext"][0]; g != largeTextValue {
+@@ -234,3 +236,135 @@ Content-Disposition: form-data; name="largetext"
+ })
+ }
+ }
++
++// TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
++// MIME headers, and map entry overhead while limiting the memory consumption of parsed forms.
++func TestReadForm_MetadataTooLarge(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ f func(*Writer)
++ }{{
++ name: "large name",
++ f: func(fw *Writer) {
++ name := strings.Repeat("a", 10<<20)
++ w, _ := fw.CreateFormField(name)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "large MIME header",
++ f: func(fw *Writer) {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition", `form-data; name="a"`)
++ h.Set("X-Foo", strings.Repeat("a", 10<<20))
++ w, _ := fw.CreatePart(h)
++ w.Write([]byte("value"))
++ },
++ }, {
++ name: "many parts",
++ f: func(fw *Writer) {
++ for i := 0; i < 110000; i++ {
++ w, _ := fw.CreateFormField("f")
++ w.Write([]byte("v"))
++ }
++ },
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.f(fw)
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ _, err := fr.ReadForm(0)
++ if err != ErrMessageTooLarge {
++ t.Errorf("fr.ReadForm() = %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++// TestReadForm_ManyFiles_Combined tests that a multipart form containing many files only
++// results in a single on-disk file.
++func TestReadForm_ManyFiles_Combined(t *testing.T) {
++ const distinct = false
++ testReadFormManyFiles(t, distinct)
++}
++
++// TestReadForm_ManyFiles_Distinct tests that setting GODEBUG=multipartfiles=distinct
++// results in every file in a multipart form being placed in a distinct on-disk file.
++func TestReadForm_ManyFiles_Distinct(t *testing.T) {
++ t.Setenv("GODEBUG", "multipartfiles=distinct")
++ const distinct = true
++ testReadFormManyFiles(t, distinct)
++}
++
++func testReadFormManyFiles(t *testing.T, distinct bool) {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ const numFiles = 10
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ w, err := fw.CreateFormFile(name, name)
++ if err != nil {
++ t.Fatal(err)
++ }
++ w.Write([]byte(name))
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(&buf, fw.Boundary())
++ fr.tempDir = t.TempDir()
++ form, err := fr.ReadForm(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ for i := 0; i < numFiles; i++ {
++ name := fmt.Sprint(i)
++ if got := len(form.File[name]); got != 1 {
++ t.Fatalf("form.File[%q] has %v entries, want 1", name, got)
++ }
++ fh := form.File[name][0]
++ file, err := fh.Open()
++ if err != nil {
++ t.Fatalf("form.File[%q].Open() = %v", name, err)
++ }
++ if distinct {
++ if _, ok := file.(*os.File); !ok {
++ t.Fatalf("form.File[%q].Open: %T, want *os.File", name, file)
++ }
++ }
++ got, err := io.ReadAll(file)
++ file.Close()
++ if string(got) != name || err != nil {
++ t.Fatalf("read form.File[%q]: %q, %v; want %q, nil", name, string(got), err, name)
++ }
++ }
++ dir, err := os.Open(fr.tempDir)
++ if err != nil {
++ t.Fatal(err)
++ }
++ defer dir.Close()
++ names, err := dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ wantNames := 1
++ if distinct {
++ wantNames = numFiles
++ }
++ if len(names) != wantNames {
++ t.Fatalf("temp dir contains %v files; want 1", len(names))
++ }
++ if err := form.RemoveAll(); err != nil {
++ t.Fatalf("form.RemoveAll() = %v", err)
++ }
++ names, err = dir.Readdirnames(0)
++ if err != nil {
++ t.Fatal(err)
++ }
++ if len(names) != 0 {
++ t.Fatalf("temp dir contains %v files; want 0", len(names))
++ }
++}
+diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
+index 1750300..958cef8 100644
+--- a/src/mime/multipart/multipart.go
++++ b/src/mime/multipart/multipart.go
+@@ -121,12 +121,12 @@ func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -142,12 +142,16 @@ func newPart(mr *Reader, rawPart bool) (*Part, error) {
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders() error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := r.ReadMIMEHeader()
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize)
+ if err == nil {
+ bp.Header = header
+ }
++ // TODO: Add a distinguishable error to net/textproto.
++ if err != nil && err.Error() == "message too large" {
++ err = ErrMessageTooLarge
++ }
+ return err
+ }
+
+@@ -287,6 +291,7 @@ func (p *Part) Close() error {
+ // isn't supported.
+ type Reader struct {
+ bufReader *bufio.Reader
++ tempDir string // used in tests
+
+ currentPart *Part
+ partsRead int
+@@ -297,6 +302,10 @@ type Reader struct {
+ dashBoundary []byte // "--boundary"
+ }
+
++// maxMIMEHeaderSize is the maximum size of a MIME header we will parse,
++// including header keys, values, and map overhead.
++const maxMIMEHeaderSize = 10 << 20
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -304,7 +313,7 @@ type Reader struct {
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false)
++ return r.nextPart(false, maxMIMEHeaderSize)
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -313,10 +322,10 @@ func (r *Reader) NextPart() (*Part, error) {
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true)
++ return r.nextPart(true, maxMIMEHeaderSize)
+ }
+
+-func (r *Reader) nextPart(rawPart bool) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -341,7 +350,7 @@ func (r *Reader) nextPart(rawPart bool) (*Part, error) {
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
+ if err != nil {
+ return nil, err
+ }
+diff --git a/src/mime/multipart/readmimeheader.go b/src/mime/multipart/readmimeheader.go
+new file mode 100644
+index 0000000..6836928
+--- /dev/null
++++ b/src/mime/multipart/readmimeheader.go
+@@ -0,0 +1,14 @@
++// Copyright 2023 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++package multipart
++
++import (
++ "net/textproto"
++ _ "unsafe" // for go:linkname
++)
++
++// readMIMEHeader is defined in package net/textproto.
++//
++//go:linkname readMIMEHeader net/textproto.readMIMEHeader
++func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 94133ee..170d3f5 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -962,7 +962,7 @@ func testMissingFile(t *testing.T, req *Request) {
+ t.Errorf("FormFile file = %v, want nil", f)
+ }
+ if fh != nil {
+- t.Errorf("FormFile file header = %q, want nil", fh)
++ t.Errorf("FormFile file header = %v, want nil", fh)
+ }
+ if err != ErrMissingFile {
+ t.Errorf("FormFile err = %q, want ErrMissingFile", err)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index f63f5ec..96553fb 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -7,9 +7,11 @@ package textproto
+ import (
+ "bufio"
+ "bytes"
++ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
++ "math"
+ "strconv"
+ "strings"
+ "sync"
+@@ -482,6 +484,12 @@ func (r *Reader) ReadDotLines() ([]string, error) {
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
++ return readMIMEHeader(r, math.MaxInt64)
++}
++
++// readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
++// It is called by the mime/multipart package.
++func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -525,6 +533,15 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ continue
+ }
+
++ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
++ //
++ // value is computed as
++ // value := string(bytes.TrimLeft(v, " \t"))
++ //
++ // in the original patch from 1.19. This relies on
++ // 'v' which does not exist in 1.14. We leave the
++ // 1.14 method unchanged.
++
+ // Skip initial spaces in value.
+ i++ // skip colon
+ for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') {
+@@ -533,6 +550,16 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ value := string(kv[i:])
+
+ vv := m[key]
++ if vv == nil {
++ lim -= int64(len(key))
++ lim -= 100 // map entry overhead
++ }
++ lim -= int64(len(value))
++ if lim < 0 {
++ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
++ // to allow mime/multipart to detect it.
++ return m, errors.New("message too large")
++ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch
new file mode 100644
index 0000000000..d50db04bed
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24534.patch
@@ -0,0 +1,200 @@
+From d6759e7a059f4208f07aa781402841d7ddaaef96 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Fri, 10 Mar 2023 14:21:05 -0800
+Subject: [PATCH] [release-branch.go1.19] net/textproto: avoid overpredicting
+ the number of MIME header keys
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802452
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+(cherry picked from commit f739f080a72fd5b06d35c8e244165159645e2ed6)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802393
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: I675451438d619a9130360c56daf529559004903f
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481982
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/d6759e7a059f4208f07aa781402841d7ddaaef96]
+CVE: CVE-2023-24534
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/bytes/bytes.go | 13 +++++++
+ src/net/textproto/reader.go | 31 +++++++++++------
+ src/net/textproto/reader_test.go | 59 ++++++++++++++++++++++++++++++++
+ 3 files changed, 92 insertions(+), 11 deletions(-)
+
+diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go
+index e872cc2..1f0d760 100644
+--- a/src/bytes/bytes.go
++++ b/src/bytes/bytes.go
+@@ -1078,6 +1078,19 @@ func Index(s, sep []byte) int {
+ return -1
+ }
+
++// Cut slices s around the first instance of sep,
++// returning the text before and after sep.
++// The found result reports whether sep appears in s.
++// If sep does not appear in s, cut returns s, nil, false.
++//
++// Cut returns slices of the original slice s, not copies.
++func Cut(s, sep []byte) (before, after []byte, found bool) {
++ if i := Index(s, sep); i >= 0 {
++ return s[:i], s[i+len(sep):], true
++ }
++ return s, nil, false
++}
++
+ func indexRabinKarp(s, sep []byte) int {
+ // Rabin-Karp search
+ hashsep, pow := hashStr(sep)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index a505da9..8d547fe 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -486,8 +487,11 @@ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+ var strs []string
+- hint := r.upcomingHeaderNewlines()
++ hint := r.upcomingHeaderKeys()
+ if hint > 0 {
++ if hint > 1000 {
++ hint = 1000 // set a cap to avoid overallocation
++ }
+ strs = make([]string, hint)
+ }
+
+@@ -562,9 +566,11 @@ func mustHaveFieldNameColon(line []byte) error {
+ return nil
+ }
+
+-// upcomingHeaderNewlines returns an approximation of the number of newlines
++var nl = []byte("\n")
++
++// upcomingHeaderKeys returns an approximation of the number of keys
+ // that will be in this header. If it gets confused, it returns 0.
+-func (r *Reader) upcomingHeaderNewlines() (n int) {
++func (r *Reader) upcomingHeaderKeys() (n int) {
+ // Try to determine the 'hint' size.
+ r.R.Peek(1) // force a buffer load if empty
+ s := r.R.Buffered()
+@@ -572,17 +578,20 @@ func (r *Reader) upcomingHeaderNewlines() (n int) {
+ return
+ }
+ peek, _ := r.R.Peek(s)
+- for len(peek) > 0 {
+- i := bytes.IndexByte(peek, '\n')
+- if i < 3 {
+- // Not present (-1) or found within the next few bytes,
+- // implying we're at the end ("\r\n\r\n" or "\n\n")
+- return
++ for len(peek) > 0 && n < 1000 {
++ var line []byte
++ line, peek, _ = bytes.Cut(peek, nl)
++ if len(line) == 0 || (len(line) == 1 && line[0] == '\r') {
++ // Blank line separating headers from the body.
++ break
++ }
++ if line[0] == ' ' || line[0] == '\t' {
++ // Folded continuation of the previous line.
++ continue
+ }
+ n++
+- peek = peek[i+1:]
+ }
+- return
++ return n
+ }
+
+ // CanonicalMIMEHeaderKey returns the canonical format of the
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3124d43..3ae0de1 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -9,6 +9,7 @@ import (
+ "bytes"
+ "io"
+ "reflect"
++ "runtime"
+ "strings"
+ "testing"
+ )
+@@ -127,6 +128,42 @@ func TestReadMIMEHeaderSingle(t *testing.T) {
+ }
+ }
+
++// TestReaderUpcomingHeaderKeys is testing an internal function, but it's very
++// difficult to test well via the external API.
++func TestReaderUpcomingHeaderKeys(t *testing.T) {
++ for _, test := range []struct {
++ input string
++ want int
++ }{{
++ input: "",
++ want: 0,
++ }, {
++ input: "A: v",
++ want: 1,
++ }, {
++ input: "A: v\r\nB: v\r\n",
++ want: 2,
++ }, {
++ input: "A: v\nB: v\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n continued\r\n still continued\r\nB: v\r\n\r\n",
++ want: 2,
++ }, {
++ input: "A: v\r\n\r\nB: v\r\nC: v\r\n",
++ want: 1,
++ }, {
++ input: "A: v" + strings.Repeat("\n", 1000),
++ want: 1,
++ }} {
++ r := reader(test.input)
++ got := r.upcomingHeaderKeys()
++ if test.want != got {
++ t.Fatalf("upcomingHeaderKeys(%q): %v; want %v", test.input, got, test.want)
++ }
++ }
++}
++
+ func TestReadMIMEHeaderNoKey(t *testing.T) {
+ r := reader(": bar\ntest-1: 1\n\n")
+ m, err := r.ReadMIMEHeader()
+@@ -223,6 +260,28 @@ func TestReadMIMEHeaderTrimContinued(t *testing.T) {
+ }
+ }
+
++// Test that reading a header doesn't overallocate. Issue 58975.
++func TestReadMIMEHeaderAllocations(t *testing.T) {
++ var totalAlloc uint64
++ const count = 200
++ for i := 0; i < count; i++ {
++ r := reader("A: b\r\n\r\n" + strings.Repeat("\n", 4096))
++ var m1, m2 runtime.MemStats
++ runtime.ReadMemStats(&m1)
++ _, err := r.ReadMIMEHeader()
++ if err != nil {
++ t.Fatalf("ReadMIMEHeader: %v", err)
++ }
++ runtime.ReadMemStats(&m2)
++ totalAlloc += m2.TotalAlloc - m1.TotalAlloc
++ }
++ // 32k is large and we actually allocate substantially less,
++ // but prior to the fix for #58975 we allocated ~400k in this case.
++ if got, want := totalAlloc/count, uint64(32768); got > want {
++ t.Fatalf("ReadMIMEHeader allocated %v bytes, want < %v", got, want)
++ }
++}
++
+ type readResponseTest struct {
+ in string
+ inCode int
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch
new file mode 100644
index 0000000000..39e1304fbd
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_1.patch
@@ -0,0 +1,134 @@
+From ef41a4e2face45e580c5836eaebd51629fc23f15 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 14:18:04 -0700
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: avoid excessive copy
+ buffer allocations in ReadForm
+
+When copying form data to disk with io.Copy,
+allocate only one copy buffer and reuse it rather than
+creating two buffers per file (one from io.multiReader.WriteTo,
+and a second one from os.File.ReadFrom).
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802453
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802395
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ie405470c92abffed3356913b37d813e982c96c8b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481983
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ef41a4e2face45e580c5836eaebd51629fc23f15]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 15 +++++++--
+ src/mime/multipart/formdata_test.go | 49 +++++++++++++++++++++++++++++
+ 2 files changed, 61 insertions(+), 3 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index a7d4ca97f0484..975dcb6b26db4 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -84,6 +84,7 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ maxMemoryBytes = math.MaxInt64
+ }
+ }
++ var copyBuf []byte
+ for {
+ p, err := r.nextPart(false, maxMemoryBytes)
+ if err == io.EOF {
+@@ -147,14 +148,22 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ }
+ numDiskFiles++
+- size, err := io.Copy(file, io.MultiReader(&b, p))
++ if _, err := file.Write(b.Bytes()); err != nil {
++ return nil, err
++ }
++ if copyBuf == nil {
++ copyBuf = make([]byte, 32*1024) // same buffer size as io.Copy uses
++ }
++ // os.File.ReadFrom will allocate its own copy buffer if we let io.Copy use it.
++ type writerOnly struct{ io.Writer }
++ remainingSize, err := io.CopyBuffer(writerOnly{file}, p, copyBuf)
+ if err != nil {
+ return nil, err
+ }
+ fh.tmpfile = file.Name()
+- fh.Size = size
++ fh.Size = int64(b.Len()) + remainingSize
+ fh.tmpoff = fileOff
+- fileOff += size
++ fileOff += fh.Size
+ if !combineFiles {
+ if err := file.Close(); err != nil {
+ return nil, err
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 5cded7170c6b8..f5b56083b2377 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -368,3 +368,52 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ t.Fatalf("temp dir contains %v files; want 0", len(names))
+ }
+ }
++
++func BenchmarkReadForm(b *testing.B) {
++ for _, test := range []struct {
++ name string
++ form func(fw *Writer, count int)
++ }{{
++ name: "fields",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }, {
++ name: "files",
++ form: func(fw *Writer, count int) {
++ for i := 0; i < count; i++ {
++ w, _ := fw.CreateFormFile(fmt.Sprintf("field%v", i), fmt.Sprintf("file%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ },
++ }} {
++ b.Run(test.name, func(b *testing.B) {
++ for _, maxMemory := range []int64{
++ 0,
++ 1 << 20,
++ } {
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ test.form(fw, 10)
++ if err := fw.Close(); err != nil {
++ b.Fatal(err)
++ }
++ b.Run(fmt.Sprintf("maxMemory=%v", maxMemory), func(b *testing.B) {
++ b.ReportAllocs()
++ for i := 0; i < b.N; i++ {
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(maxMemory)
++ if err != nil {
++ b.Fatal(err)
++ }
++ form.RemoveAll()
++ }
++
++ })
++ }
++ })
++ }
++}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch
new file mode 100644
index 0000000000..9ba5114c82
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_2.patch
@@ -0,0 +1,184 @@
+From 7a359a651c7ebdb29e0a1c03102fce793e9f58f0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 16 Mar 2023 16:56:12 -0700
+Subject: [PATCH] [release-branch.go1.19] net/textproto, mime/multipart:
+ improve accounting of non-file data
+
+For requests containing large numbers of small parts,
+memory consumption of a parsed form could be about 250%
+over the estimated size.
+
+When considering the size of parsed forms, account for the size of
+FileHeader structs and increase the estimate of memory consumed by
+map entries.
+
+Thanks to Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802454
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802396
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: I31bc50e9346b4eee6fbe51a18c3c57230cc066db
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481984
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7a359a651c7ebdb29e0a1c03102fce793e9f58f0]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 9 +++--
+ src/mime/multipart/formdata_test.go | 55 ++++++++++++-----------------
+ src/net/textproto/reader.go | 8 ++++-
+ 3 files changed, 37 insertions(+), 35 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 975dcb6b26db4..3f6ff697ca608 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -103,8 +103,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ // Multiple values for the same key (one map entry, longer slice) are cheaper
+ // than the same number of values for different keys (many map entries), but
+ // using a consistent per-value cost for overhead is simpler.
++ const mapEntryOverhead = 200
+ maxMemoryBytes -= int64(len(name))
+- maxMemoryBytes -= 100 // map overhead
++ maxMemoryBytes -= mapEntryOverhead
+ if maxMemoryBytes < 0 {
+ // We can't actually take this path, since nextPart would already have
+ // rejected the MIME headers for being too large. Check anyway.
+@@ -128,7 +129,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ // file, store in memory or on disk
++ const fileHeaderSize = 100
+ maxMemoryBytes -= mimeHeaderSize(p.Header)
++ maxMemoryBytes -= mapEntryOverhead
++ maxMemoryBytes -= fileHeaderSize
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
+@@ -183,9 +187,10 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+
+ func mimeHeaderSize(h textproto.MIMEHeader) (size int64) {
++ size = 400
+ for k, vs := range h {
+ size += int64(len(k))
+- size += 100 // map entry overhead
++ size += 200 // map entry overhead
+ for _, v := range vs {
+ size += int64(len(v))
+ }
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index f5b56083b2377..8ed26e0c34081 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -192,10 +192,10 @@ func (r *failOnReadAfterErrorReader) Read(p []byte) (n int, err error) {
+ // TestReadForm_NonFileMaxMemory asserts that the ReadForm maxMemory limit is applied
+ // while processing non-file form data as well as file form data.
+ func TestReadForm_NonFileMaxMemory(t *testing.T) {
+- n := 10<<20 + 25
+ if testing.Short() {
+- n = 10<<10 + 25
++ t.Skip("skipping in -short mode")
+ }
++ n := 10 << 20
+ largeTextValue := strings.Repeat("1", n)
+ message := `--MyBoundary
+ Content-Disposition: form-data; name="largetext"
+@@ -203,38 +203,29 @@ Content-Disposition: form-data; name="largetext"
+ ` + largeTextValue + `
+ --MyBoundary--
+ `
+-
+ testBody := strings.ReplaceAll(message, "\n", "\r\n")
+- testCases := []struct {
+- name string
+- maxMemory int64
+- err error
+- }{
+- {"smaller", 50 + int64(len("largetext")) + 100, nil},
+- {"exact-fit", 25 + int64(len("largetext")) + 100, nil},
+- {"too-large", 0, ErrMessageTooLarge},
+- }
+- for _, tc := range testCases {
+- t.Run(tc.name, func(t *testing.T) {
+- if tc.maxMemory == 0 && testing.Short() {
+- t.Skip("skipping in -short mode")
+- }
+- b := strings.NewReader(testBody)
+- r := NewReader(b, boundary)
+- f, err := r.ReadForm(tc.maxMemory)
+- if err == nil {
+- defer f.RemoveAll()
+- }
+- if tc.err != err {
+- t.Fatalf("ReadForm error - got: %v; expected: %v", err, tc.err)
+- }
+- if err == nil {
+- if g := f.Value["largetext"][0]; g != largeTextValue {
+- t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
+- }
+- }
+- })
++ // Try parsing the form with increasing maxMemory values.
++ // Changes in how we account for non-file form data may cause the exact point
++ // where we change from rejecting the form as too large to accepting it to vary,
++ // but we should see both successes and failures.
++ const failWhenMaxMemoryLessThan = 128
++ for maxMemory := int64(0); maxMemory < failWhenMaxMemoryLessThan*2; maxMemory += 16 {
++ b := strings.NewReader(testBody)
++ r := NewReader(b, boundary)
++ f, err := r.ReadForm(maxMemory)
++ if err != nil {
++ continue
++ }
++ if g := f.Value["largetext"][0]; g != largeTextValue {
++ t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
++ }
++ f.RemoveAll()
++ if maxMemory < failWhenMaxMemoryLessThan {
++ t.Errorf("ReadForm(%v): no error, expect to hit memory limit when maxMemory < %v", maxMemory, failWhenMaxMemoryLessThan)
++ }
++ return
+ }
++ t.Errorf("ReadForm(x) failed for x < 1024, expect success")
+ }
+
+ // TestReadForm_MetadataTooLarge verifies that we account for the size of field names,
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 9a21777df8be0..c1284fde25eb7 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -503,6 +503,12 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ m := make(MIMEHeader, hint)
+
++ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
++ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
++ // MIMEHeaders average about 200 bytes per entry.
++ lim -= 400
++ const mapEntryOverhead = 200
++
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+ line, err := r.readLineSlice()
+@@ -538,7 +544,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ vv := m[key]
+ if vv == nil {
+ lim -= int64(len(key))
+- lim -= 100 // map entry overhead
++ lim -= mapEntryOverhead
+ }
+ lim -= int64(len(value))
+ if lim < 0 {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch
new file mode 100644
index 0000000000..58c0a484ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24536_3.patch
@@ -0,0 +1,349 @@
+From 7917b5f31204528ea72e0629f0b7d52b35b27538 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH] [release-branch.go1.19] mime/multipart: limit parsed mime message sizes
+
+The parsed forms of MIME headers and multipart forms can consume
+substantially more memory than the size of the input data.
+A malicious input containing a very large number of headers or
+form parts can cause excessively large memory allocations.
+
+Set limits on the size of MIME data:
+
+Reader.NextPart and Reader.NextRawPart limit the the number
+of headers in a part to 10000.
+
+Reader.ReadForm limits the total number of headers in all
+FileHeaders to 10000.
+
+Both of these limits may be set with with
+GODEBUG=multipartmaxheaders=<values>.
+
+Reader.ReadForm limits the number of parts in a form to 1000.
+This limit may be set with GODEBUG=multipartmaxparts=<value>.
+
+Thanks for Jakob Ackermann (@das7pad) for reporting this issue.
+
+For CVE-2023-24536
+For #59153
+For #59269
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802455
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1801087
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: If134890d75f0d95c681d67234daf191ba08e6424
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481985
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/7917b5f31204528ea72e0629f0b7d52b35b27538]
+CVE: CVE-2023-24536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata.go | 19 ++++++++-
+ src/mime/multipart/formdata_test.go | 61 ++++++++++++++++++++++++++++
+ src/mime/multipart/multipart.go | 31 ++++++++++----
+ src/mime/multipart/readmimeheader.go | 2 +-
+ src/net/textproto/reader.go | 19 +++++----
+ 5 files changed, 115 insertions(+), 17 deletions(-)
+
+diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go
+index 216cccb..0b508ae 100644
+--- a/src/mime/multipart/formdata.go
++++ b/src/mime/multipart/formdata.go
+@@ -13,6 +13,7 @@ import (
+ "math"
+ "net/textproto"
+ "os"
++ "strconv"
+ )
+
+ // ErrMessageTooLarge is returned by ReadForm if the message form
+@@ -42,6 +43,15 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ numDiskFiles := 0
+ multipartFiles := godebug.Get("multipartfiles")
+ combineFiles := multipartFiles != "distinct"
++ maxParts := 1000
++ multipartMaxParts := godebug.Get("multipartmaxparts")
++ if multipartMaxParts != "" {
++ if v, err := strconv.Atoi(multipartMaxParts); err == nil && v >= 0 {
++ maxParts = v
++ }
++ }
++ maxHeaders := maxMIMEHeaders()
++
+ defer func() {
+ if file != nil {
+ if cerr := file.Close(); err == nil {
+@@ -87,13 +97,17 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ }
+ var copyBuf []byte
+ for {
+- p, err := r.nextPart(false, maxMemoryBytes)
++ p, err := r.nextPart(false, maxMemoryBytes, maxHeaders)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
++ if maxParts <= 0 {
++ return nil, ErrMessageTooLarge
++ }
++ maxParts--
+
+ name := p.FormName()
+ if name == "" {
+@@ -137,6 +151,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
+ if maxMemoryBytes < 0 {
+ return nil, ErrMessageTooLarge
+ }
++ for _, v := range p.Header {
++ maxHeaders -= int64(len(v))
++ }
+ fh := &FileHeader{
+ Filename: filename,
+ Header: p.Header,
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index 8ed26e0..c78eeb7 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -360,6 +360,67 @@ func testReadFormManyFiles(t *testing.T, distinct bool) {
+ }
+ }
+
++func TestReadFormLimits(t *testing.T) {
++ for _, test := range []struct {
++ values int
++ files int
++ extraKeysPerFile int
++ wantErr error
++ godebug string
++ }{
++ {values: 1000},
++ {values: 1001, wantErr: ErrMessageTooLarge},
++ {values: 500, files: 500},
++ {values: 501, files: 500, wantErr: ErrMessageTooLarge},
++ {files: 1000},
++ {files: 1001, wantErr: ErrMessageTooLarge},
++ {files: 1, extraKeysPerFile: 9998}, // plus Content-Disposition and Content-Type
++ {files: 1, extraKeysPerFile: 10000, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxparts=100", values: 100},
++ {godebug: "multipartmaxparts=100", values: 101, wantErr: ErrMessageTooLarge},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 48},
++ {godebug: "multipartmaxheaders=100", files: 2, extraKeysPerFile: 50, wantErr: ErrMessageTooLarge},
++ } {
++ name := fmt.Sprintf("values=%v/files=%v/extraKeysPerFile=%v", test.values, test.files, test.extraKeysPerFile)
++ if test.godebug != "" {
++ name += fmt.Sprintf("/godebug=%v", test.godebug)
++ }
++ t.Run(name, func(t *testing.T) {
++ if test.godebug != "" {
++ t.Setenv("GODEBUG", test.godebug)
++ }
++ var buf bytes.Buffer
++ fw := NewWriter(&buf)
++ for i := 0; i < test.values; i++ {
++ w, _ := fw.CreateFormField(fmt.Sprintf("field%v", i))
++ fmt.Fprintf(w, "value %v", i)
++ }
++ for i := 0; i < test.files; i++ {
++ h := make(textproto.MIMEHeader)
++ h.Set("Content-Disposition",
++ fmt.Sprintf(`form-data; name="file%v"; filename="file%v"`, i, i))
++ h.Set("Content-Type", "application/octet-stream")
++ for j := 0; j < test.extraKeysPerFile; j++ {
++ h.Set(fmt.Sprintf("k%v", j), "v")
++ }
++ w, _ := fw.CreatePart(h)
++ fmt.Fprintf(w, "value %v", i)
++ }
++ if err := fw.Close(); err != nil {
++ t.Fatal(err)
++ }
++ fr := NewReader(bytes.NewReader(buf.Bytes()), fw.Boundary())
++ form, err := fr.ReadForm(1 << 10)
++ if err == nil {
++ defer form.RemoveAll()
++ }
++ if err != test.wantErr {
++ t.Errorf("ReadForm = %v, want %v", err, test.wantErr)
++ }
++ })
++ }
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
+index 958cef8..94464a8 100644
+--- a/src/mime/multipart/multipart.go
++++ b/src/mime/multipart/multipart.go
+@@ -16,11 +16,13 @@ import (
+ "bufio"
+ "bytes"
+ "fmt"
++ "internal/godebug"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/quotedprintable"
+ "net/textproto"
++ "strconv"
+ "strings"
+ )
+
+@@ -121,12 +123,12 @@ func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ return n, r.err
+ }
+
+-func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ bp := &Part{
+ Header: make(map[string][]string),
+ mr: mr,
+ }
+- if err := bp.populateHeaders(maxMIMEHeaderSize); err != nil {
++ if err := bp.populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders); err != nil {
+ return nil, err
+ }
+ bp.r = partReader{bp}
+@@ -142,9 +144,9 @@ func newPart(mr *Reader, rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
+ return bp, nil
+ }
+
+-func (bp *Part) populateHeaders(maxMIMEHeaderSize int64) error {
++func (bp *Part) populateHeaders(maxMIMEHeaderSize, maxMIMEHeaders int64) error {
+ r := textproto.NewReader(bp.mr.bufReader)
+- header, err := readMIMEHeader(r, maxMIMEHeaderSize)
++ header, err := readMIMEHeader(r, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err == nil {
+ bp.Header = header
+ }
+@@ -306,6 +308,19 @@ type Reader struct {
+ // including header keys, values, and map overhead.
+ const maxMIMEHeaderSize = 10 << 20
+
++func maxMIMEHeaders() int64 {
++ // multipartMaxHeaders is the maximum number of header entries NextPart will return,
++ // as well as the maximum combined total of header entries Reader.ReadForm will return
++ // in FileHeaders.
++ multipartMaxHeaders := godebug.Get("multipartmaxheaders")
++ if multipartMaxHeaders != "" {
++ if v, err := strconv.ParseInt(multipartMaxHeaders, 10, 64); err == nil && v >= 0 {
++ return v
++ }
++ }
++ return 10000
++}
++
+ // NextPart returns the next part in the multipart or an error.
+ // When there are no more parts, the error io.EOF is returned.
+ //
+@@ -313,7 +328,7 @@ const maxMIMEHeaderSize = 10 << 20
+ // has a value of "quoted-printable", that header is instead
+ // hidden and the body is transparently decoded during Read calls.
+ func (r *Reader) NextPart() (*Part, error) {
+- return r.nextPart(false, maxMIMEHeaderSize)
++ return r.nextPart(false, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+ // NextRawPart returns the next part in the multipart or an error.
+@@ -322,10 +337,10 @@ func (r *Reader) NextPart() (*Part, error) {
+ // Unlike NextPart, it does not have special handling for
+ // "Content-Transfer-Encoding: quoted-printable".
+ func (r *Reader) NextRawPart() (*Part, error) {
+- return r.nextPart(true, maxMIMEHeaderSize)
++ return r.nextPart(true, maxMIMEHeaderSize, maxMIMEHeaders())
+ }
+
+-func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error) {
++func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize, maxMIMEHeaders int64) (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
+ }
+@@ -350,7 +365,7 @@ func (r *Reader) nextPart(rawPart bool, maxMIMEHeaderSize int64) (*Part, error)
+
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+- bp, err := newPart(r, rawPart, maxMIMEHeaderSize)
++ bp, err := newPart(r, rawPart, maxMIMEHeaderSize, maxMIMEHeaders)
+ if err != nil {
+ return nil, err
+ }
+diff --git a/src/mime/multipart/readmimeheader.go b/src/mime/multipart/readmimeheader.go
+index 6836928..25aa6e2 100644
+--- a/src/mime/multipart/readmimeheader.go
++++ b/src/mime/multipart/readmimeheader.go
+@@ -11,4 +11,4 @@ import (
+ // readMIMEHeader is defined in package net/textproto.
+ //
+ //go:linkname readMIMEHeader net/textproto.readMIMEHeader
+-func readMIMEHeader(r *textproto.Reader, lim int64) (textproto.MIMEHeader, error)
++func readMIMEHeader(r *textproto.Reader, maxMemory, maxHeaders int64) (textproto.MIMEHeader, error)
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index 1c79f0a..ad2d777 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -484,12 +484,12 @@ func (r *Reader) ReadDotLines() ([]string, error) {
+ // }
+ //
+ func (r *Reader) ReadMIMEHeader() (MIMEHeader, error) {
+- return readMIMEHeader(r, math.MaxInt64)
++ return readMIMEHeader(r, math.MaxInt64, math.MaxInt64)
+ }
+
+ // readMIMEHeader is a version of ReadMIMEHeader which takes a limit on the header size.
+ // It is called by the mime/multipart package.
+-func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
++func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) {
+ // Avoid lots of small slice allocations later by allocating one
+ // large one ahead of time which we'll cut up into smaller
+ // slices. If this isn't big enough later, we allocate small ones.
+@@ -507,7 +507,7 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ // Account for 400 bytes of overhead for the MIMEHeader, plus 200 bytes per entry.
+ // Benchmarking map creation as of go1.20, a one-entry MIMEHeader is 416 bytes and large
+ // MIMEHeaders average about 200 bytes per entry.
+- lim -= 400
++ maxMemory -= 400
+ const mapEntryOverhead = 200
+
+ // The first line cannot start with a leading space.
+@@ -539,6 +539,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+ continue
+ }
+
++ maxHeaders--
++ if maxHeaders < 0 {
++ return nil, errors.New("message too large")
++ }
++
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+ //
+ // value is computed as
+@@ -557,11 +562,11 @@ func readMIMEHeader(r *Reader, lim int64) (MIMEHeader, error) {
+
+ vv := m[key]
+ if vv == nil {
+- lim -= int64(len(key))
+- lim -= mapEntryOverhead
++ maxMemory -= int64(len(key))
++ maxMemory -= mapEntryOverhead
+ }
+- lim -= int64(len(value))
+- if lim < 0 {
++ maxMemory -= int64(len(value))
++ if maxMemory < 0 {
+ // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+ // to allow mime/multipart to detect it.
+ return m, errors.New("message too large")
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch
new file mode 100644
index 0000000000..e04b717fc1
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24537.patch
@@ -0,0 +1,76 @@
+From bf8c7c575c8a552d9d79deb29e80854dc88528d0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Mon, 20 Mar 2023 10:43:19 -0700
+Subject: [PATCH] [release-branch.go1.20] mime/multipart: limit parsed mime
+ message sizes
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802456
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802611
+Reviewed-by: Damien Neil <dneil@google.com>
+Change-Id: Ifdfa192d54f722d781a4d8c5f35b5fb72d122168
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481986
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/126a1d02da82f93ede7ce0bd8d3c51ef627f2104]
+CVE: CVE-2023-24537
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/go/parser/parser_test.go | 16 ++++++++++++++++
+ src/go/scanner/scanner.go | 5 ++++-
+ 2 files changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
+index 37a6a2b..714557c 100644
+--- a/src/go/parser/parser_test.go
++++ b/src/go/parser/parser_test.go
+@@ -738,3 +738,19 @@ func TestScopeDepthLimit(t *testing.T) {
+ }
+ }
+ }
++
++// TestIssue59180 tests that line number overflow doesn't cause an infinite loop.
++func TestIssue59180(t *testing.T) {
++ testcases := []string{
++ "package p\n//line :9223372036854775806\n\n//",
++ "package p\n//line :1:9223372036854775806\n\n//",
++ "package p\n//line file:9223372036854775806\n\n//",
++ }
++
++ for _, src := range testcases {
++ _, err := ParseFile(token.NewFileSet(), "", src, ParseComments)
++ if err == nil {
++ t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
++ }
++ }
++}
+diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
+index 00fe2dc..3159d25 100644
+--- a/src/go/scanner/scanner.go
++++ b/src/go/scanner/scanner.go
+@@ -246,13 +246,16 @@ func (s *Scanner) updateLineInfo(next, offs int, text []byte) {
+ return
+ }
+
++ // Put a cap on the maximum size of line and column numbers.
++ // 30 bits allows for some additional space before wrapping an int32.
++ const maxLineCol = 1<<30 - 1
+ var line, col int
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+- if col == 0 {
++ if col == 0 || col > maxLineCol {
+ s.error(offs+i2, "invalid column number: "+string(text[i2:]))
+ return
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch
new file mode 100644
index 0000000000..23c5075e41
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-1.patch
@@ -0,0 +1,125 @@
+From 8acd01094d9ee17f6e763a61e49a8a808b3a9ddb Mon Sep 17 00:00:00 2001
+From: Brad Fitzpatrick <bradfitz@golang.org>
+Date: Mon, 2 Aug 2021 14:55:51 -0700
+Subject: [PATCH 1/6] net/netip: add new IP address package
+
+Co-authored-by: Alex Willmer <alex@moreati.org.uk> (GitHub @moreati)
+Co-authored-by: Alexander Yastrebov <yastrebov.alex@gmail.com>
+Co-authored-by: David Anderson <dave@natulte.net> (Tailscale CLA)
+Co-authored-by: David Crawshaw <crawshaw@tailscale.com> (Tailscale CLA)
+Co-authored-by: Dmytro Shynkevych <dmytro@tailscale.com> (Tailscale CLA)
+Co-authored-by: Elias Naur <mail@eliasnaur.com>
+Co-authored-by: Joe Tsai <joetsai@digital-static.net> (Tailscale CLA)
+Co-authored-by: Jonathan Yu <jawnsy@cpan.org> (GitHub @jawnsy)
+Co-authored-by: Josh Bleecher Snyder <josharian@gmail.com> (Tailscale CLA)
+Co-authored-by: Maisem Ali <maisem@tailscale.com> (Tailscale CLA)
+Co-authored-by: Manuel Mendez (Go AUTHORS mmendez534@...)
+Co-authored-by: Matt Layher <mdlayher@gmail.com>
+Co-authored-by: Noah Treuhaft <noah.treuhaft@gmail.com> (GitHub @nwt)
+Co-authored-by: Stefan Majer <stefan.majer@gmail.com>
+Co-authored-by: Terin Stock <terinjokes@gmail.com> (Cloudflare CLA)
+Co-authored-by: Tobias Klauser <tklauser@distanz.ch>
+
+Fixes #46518
+
+Change-Id: I0041f9e1115d61fa6e95fcf32b01d9faee708712
+Reviewed-on: https://go-review.googlesource.com/c/go/+/339309
+Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+Trust: Brad Fitzpatrick <bradfitz@golang.org>
+
+Dependency Patch #1
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/a59e33224e42d60a97fa720a45e1b74eb6aaa3d0
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/internal/godebug/godebug.go | 34 ++++++++++++++++++++++++++++++++++
+ src/internal/godebug/godebug_test.go | 34 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 68 insertions(+)
+ create mode 100644 src/internal/godebug/godebug.go
+ create mode 100644 src/internal/godebug/godebug_test.go
+
+diff --git a/src/internal/godebug/godebug.go b/src/internal/godebug/godebug.go
+new file mode 100644
+index 0000000..ac434e5
+--- /dev/null
++++ b/src/internal/godebug/godebug.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// Package godebug parses the GODEBUG environment variable.
++package godebug
++
++import "os"
++
++// Get returns the value for the provided GODEBUG key.
++func Get(key string) string {
++ return get(os.Getenv("GODEBUG"), key)
++}
++
++// get returns the value part of key=value in s (a GODEBUG value).
++func get(s, key string) string {
++ for i := 0; i < len(s)-len(key)-1; i++ {
++ if i > 0 && s[i-1] != ',' {
++ continue
++ }
++ afterKey := s[i+len(key):]
++ if afterKey[0] != '=' || s[i:i+len(key)] != key {
++ continue
++ }
++ val := afterKey[1:]
++ for i, b := range val {
++ if b == ',' {
++ return val[:i]
++ }
++ }
++ return val
++ }
++ return ""
++}
+diff --git a/src/internal/godebug/godebug_test.go b/src/internal/godebug/godebug_test.go
+new file mode 100644
+index 0000000..41b9117
+--- /dev/null
++++ b/src/internal/godebug/godebug_test.go
+@@ -0,0 +1,34 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package godebug
++
++import "testing"
++
++func TestGet(t *testing.T) {
++ tests := []struct {
++ godebug string
++ key string
++ want string
++ }{
++ {"", "", ""},
++ {"", "foo", ""},
++ {"foo=bar", "foo", "bar"},
++ {"foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar,after=x", "foo", "bar"},
++ {"before=x,foo=bar", "foo", "bar"},
++ {",,,foo=bar,,,", "foo", "bar"},
++ {"foodecoy=wrong,foo=bar", "foo", "bar"},
++ {"foo=", "foo", ""},
++ {"foo", "foo", ""},
++ {",foo", "foo", ""},
++ {"foo=bar,baz", "loooooooong", ""},
++ }
++ for _, tt := range tests {
++ got := get(tt.godebug, tt.key)
++ if got != tt.want {
++ t.Errorf("get(%q, %q) = %q; want %q", tt.godebug, tt.key, got, tt.want)
++ }
++ }
++}
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch
new file mode 100644
index 0000000000..f200c41e16
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538-2.patch
@@ -0,0 +1,635 @@
+From 6fc21505614f36178df0dad7034b6b8e3f7588d5 Mon Sep 17 00:00:00 2001
+From: empijei <robclap8@gmail.com>
+Date: Fri, 27 Mar 2020 19:27:55 +0100
+Subject: [PATCH 2/6] html/template,text/template: switch to Unicode escapes
+ for JSON compatibility
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The existing implementation is not compatible with JSON
+escape as it uses hex escaping.
+Unicode escape, instead, is valid for both JSON and JS.
+This fix avoids creating a separate escaping context for
+scripts of type "application/ld+json" and it is more
+future-proof in case more JSON+JS contexts get added
+to the platform (e.g. import maps).
+
+Fixes #33671
+Fixes #37634
+
+Change-Id: Id6f6524b4abc52e81d9d744d46bbe5bf2e081543
+Reviewed-on: https://go-review.googlesource.com/c/go/+/226097
+Reviewed-by: Carl Johnson <me@carlmjohnson.net>
+Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
+Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+
+Dependency Patch #2
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/d4d298040d072ddacea0e0d6b55fb148fff18070
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/content_test.go | 70 +++++++++++++++++++-------------------
+ src/html/template/escape_test.go | 6 ++--
+ src/html/template/example_test.go | 6 ++--
+ src/html/template/js.go | 70 +++++++++++++++++++++++---------------
+ src/html/template/js_test.go | 68 ++++++++++++++++++------------------
+ src/html/template/template_test.go | 39 +++++++++++++++++++++
+ src/text/template/exec_test.go | 6 ++--
+ src/text/template/funcs.go | 8 ++---
+ 8 files changed, 163 insertions(+), 110 deletions(-)
+
+diff --git a/src/html/template/content_test.go b/src/html/template/content_test.go
+index 72d56f5..bd86527 100644
+--- a/src/html/template/content_test.go
++++ b/src/html/template/content_test.go
+@@ -18,7 +18,7 @@ func TestTypedContent(t *testing.T) {
+ HTML(`Hello, <b>World</b> &amp;tc!`),
+ HTMLAttr(` dir="ltr"`),
+ JS(`c && alert("Hello, World!");`),
+- JSStr(`Hello, World & O'Reilly\x21`),
++ JSStr(`Hello, World & O'Reilly\u0021`),
+ URL(`greeting=H%69,&addressee=(World)`),
+ Srcset(`greeting=H%69,&addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`),
+ URL(`,foo/,`),
+@@ -70,7 +70,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, <b>World</b> &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -100,7 +100,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello,&#32;World&#32;&amp;tc!`,
+ `&#32;dir&#61;&#34;ltr&#34;`,
+ `c&#32;&amp;&amp;&#32;alert(&#34;Hello,&#32;World!&#34;);`,
+- `Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\x21`,
++ `Hello,&#32;World&#32;&amp;&#32;O&#39;Reilly\u0021`,
+ `greeting&#61;H%69,&amp;addressee&#61;(World)`,
+ `greeting&#61;H%69,&amp;addressee&#61;(World)&#32;2x,&#32;https://golang.org/favicon.ico&#32;500.5w`,
+ `,foo/,`,
+@@ -115,7 +115,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, World &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -130,7 +130,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, &lt;b&gt;World&lt;/b&gt; &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -146,7 +146,7 @@ func TestTypedContent(t *testing.T) {
+ // Not escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+- `"Hello, World & O'Reilly\x21"`,
++ `"Hello, World & O'Reilly\u0021"`,
+ `"greeting=H%69,\u0026addressee=(World)"`,
+ `"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+ `",foo/,"`,
+@@ -162,7 +162,7 @@ func TestTypedContent(t *testing.T) {
+ // Not JS escaped but HTML escaped.
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+ // Escape sequence not over-escaped.
+- `&#34;Hello, World &amp; O&#39;Reilly\x21&#34;`,
++ `&#34;Hello, World &amp; O&#39;Reilly\u0021&#34;`,
+ `&#34;greeting=H%69,\u0026addressee=(World)&#34;`,
+ `&#34;greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w&#34;`,
+ `&#34;,foo/,&#34;`,
+@@ -171,30 +171,30 @@ func TestTypedContent(t *testing.T) {
+ {
+ `<script>alert("{{.}}")</script>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+ {
+ `<script type="text/javascript">alert("{{.}}")</script>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+@@ -208,7 +208,7 @@ func TestTypedContent(t *testing.T) {
+ // Not escaped.
+ `c && alert("Hello, World!");`,
+ // Escape sequence not over-escaped.
+- `"Hello, World & O'Reilly\x21"`,
++ `"Hello, World & O'Reilly\u0021"`,
+ `"greeting=H%69,\u0026addressee=(World)"`,
+ `"greeting=H%69,\u0026addressee=(World) 2x, https://golang.org/favicon.ico 500.5w"`,
+ `",foo/,"`,
+@@ -224,7 +224,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello, <b>World</b> &amp;tc!`,
+ ` dir=&#34;ltr&#34;`,
+ `c &amp;&amp; alert(&#34;Hello, World!&#34;);`,
+- `Hello, World &amp; O&#39;Reilly\x21`,
++ `Hello, World &amp; O&#39;Reilly\u0021`,
+ `greeting=H%69,&amp;addressee=(World)`,
+ `greeting=H%69,&amp;addressee=(World) 2x, https://golang.org/favicon.ico 500.5w`,
+ `,foo/,`,
+@@ -233,15 +233,15 @@ func TestTypedContent(t *testing.T) {
+ {
+ `<button onclick='alert("{{.}}")'>`,
+ []string{
+- `\x3cb\x3e \x22foo%\x22 O\x27Reilly \x26bar;`,
+- `a[href =~ \x22\/\/example.com\x22]#foo`,
+- `Hello, \x3cb\x3eWorld\x3c\/b\x3e \x26amp;tc!`,
+- ` dir=\x22ltr\x22`,
+- `c \x26\x26 alert(\x22Hello, World!\x22);`,
++ `\u003cb\u003e \u0022foo%\u0022 O\u0027Reilly \u0026bar;`,
++ `a[href =~ \u0022\/\/example.com\u0022]#foo`,
++ `Hello, \u003cb\u003eWorld\u003c\/b\u003e \u0026amp;tc!`,
++ ` dir=\u0022ltr\u0022`,
++ `c \u0026\u0026 alert(\u0022Hello, World!\u0022);`,
+ // Escape sequence not over-escaped.
+- `Hello, World \x26 O\x27Reilly\x21`,
+- `greeting=H%69,\x26addressee=(World)`,
+- `greeting=H%69,\x26addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
++ `Hello, World \u0026 O\u0027Reilly\u0021`,
++ `greeting=H%69,\u0026addressee=(World)`,
++ `greeting=H%69,\u0026addressee=(World) 2x, https:\/\/golang.org\/favicon.ico 500.5w`,
+ `,foo\/,`,
+ },
+ },
+@@ -253,7 +253,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
++ `Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is done.
+ `greeting=H%69,&amp;addressee=%28World%29`,
+ `greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+@@ -268,7 +268,7 @@ func TestTypedContent(t *testing.T) {
+ `Hello%2c%20%3cb%3eWorld%3c%2fb%3e%20%26amp%3btc%21`,
+ `%20dir%3d%22ltr%22`,
+ `c%20%26%26%20alert%28%22Hello%2c%20World%21%22%29%3b`,
+- `Hello%2c%20World%20%26%20O%27Reilly%5cx21`,
++ `Hello%2c%20World%20%26%20O%27Reilly%5cu0021`,
+ // Quotes and parens are escaped but %69 is not over-escaped. HTML escaping is not done.
+ `greeting=H%69,&addressee=%28World%29`,
+ `greeting%3dH%2569%2c%26addressee%3d%28World%29%202x%2c%20https%3a%2f%2fgolang.org%2ffavicon.ico%20500.5w`,
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index e72a9ba..c709660 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -238,7 +238,7 @@ func TestEscape(t *testing.T) {
+ {
+ "jsStr",
+ "<button onclick='alert(&quot;{{.H}}&quot;)'>",
+- `<button onclick='alert(&quot;\x3cHello\x3e&quot;)'>`,
++ `<button onclick='alert(&quot;\u003cHello\u003e&quot;)'>`,
+ },
+ {
+ "badMarshaler",
+@@ -259,7 +259,7 @@ func TestEscape(t *testing.T) {
+ {
+ "jsRe",
+ `<button onclick='alert(/{{"foo+bar"}}/.test(""))'>`,
+- `<button onclick='alert(/foo\x2bbar/.test(""))'>`,
++ `<button onclick='alert(/foo\u002bbar/.test(""))'>`,
+ },
+ {
+ "jsReBlank",
+@@ -825,7 +825,7 @@ func TestEscapeSet(t *testing.T) {
+ "main": `<button onclick="title='{{template "helper"}}'; ...">{{template "helper"}}</button>`,
+ "helper": `{{11}} of {{"<100>"}}`,
+ },
+- `<button onclick="title='11 of \x3c100\x3e'; ...">11 of &lt;100&gt;</button>`,
++ `<button onclick="title='11 of \u003c100\u003e'; ...">11 of &lt;100&gt;</button>`,
+ },
+ // A non-recursive template that ends in a different context.
+ // helper starts in jsCtxRegexp and ends in jsCtxDivOp.
+diff --git a/src/html/template/example_test.go b/src/html/template/example_test.go
+index 9d965f1..6cf936f 100644
+--- a/src/html/template/example_test.go
++++ b/src/html/template/example_test.go
+@@ -116,9 +116,9 @@ func Example_escape() {
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34; &lt;tasty@example.com&gt;
+ // &#34;Fran &amp; Freddie&#39;s Diner&#34;32&lt;tasty@example.com&gt;
+- // \"Fran \x26 Freddie\'s Diner\" \x3Ctasty@example.com\x3E
+- // \"Fran \x26 Freddie\'s Diner\" \x3Ctasty@example.com\x3E
+- // \"Fran \x26 Freddie\'s Diner\"32\x3Ctasty@example.com\x3E
++ // \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
++ // \"Fran \u0026 Freddie\'s Diner\" \u003Ctasty@example.com\u003E
++ // \"Fran \u0026 Freddie\'s Diner\"32\u003Ctasty@example.com\u003E
+ // %22Fran+%26+Freddie%27s+Diner%2232%3Ctasty%40example.com%3E
+
+ }
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index 0e91458..ea9c183 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -163,7 +163,6 @@ func jsValEscaper(args ...interface{}) string {
+ }
+ // TODO: detect cycles before calling Marshal which loops infinitely on
+ // cyclic data. This may be an unacceptable DoS risk.
+-
+ b, err := json.Marshal(a)
+ if err != nil {
+ // Put a space before comment so that if it is flush against
+@@ -178,8 +177,8 @@ func jsValEscaper(args ...interface{}) string {
+ // TODO: maybe post-process output to prevent it from containing
+ // "<!--", "-->", "<![CDATA[", "]]>", or "</script"
+ // in case custom marshalers produce output containing those.
+-
+- // TODO: Maybe abbreviate \u00ab to \xab to produce more compact output.
++ // Note: Do not use \x escaping to save bytes because it is not JSON compatible and this escaper
++ // supports ld+json content-type.
+ if len(b) == 0 {
+ // In, `x=y/{{.}}*z` a json.Marshaler that produces "" should
+ // not cause the output `x=y/*z`.
+@@ -260,6 +259,8 @@ func replace(s string, replacementTable []string) string {
+ r, w = utf8.DecodeRuneInString(s[i:])
+ var repl string
+ switch {
++ case int(r) < len(lowUnicodeReplacementTable):
++ repl = lowUnicodeReplacementTable[r]
+ case int(r) < len(replacementTable) && replacementTable[r] != "":
+ repl = replacementTable[r]
+ case r == '\u2028':
+@@ -283,67 +284,80 @@ func replace(s string, replacementTable []string) string {
+ return b.String()
+ }
+
++var lowUnicodeReplacementTable = []string{
++ 0: `\u0000`, 1: `\u0001`, 2: `\u0002`, 3: `\u0003`, 4: `\u0004`, 5: `\u0005`, 6: `\u0006`,
++ '\a': `\u0007`,
++ '\b': `\u0008`,
++ '\t': `\t`,
++ '\n': `\n`,
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
++ '\f': `\f`,
++ '\r': `\r`,
++ 0xe: `\u000e`, 0xf: `\u000f`, 0x10: `\u0010`, 0x11: `\u0011`, 0x12: `\u0012`, 0x13: `\u0013`,
++ 0x14: `\u0014`, 0x15: `\u0015`, 0x16: `\u0016`, 0x17: `\u0017`, 0x18: `\u0018`, 0x19: `\u0019`,
++ 0x1a: `\u001a`, 0x1b: `\u001b`, 0x1c: `\u001c`, 0x1d: `\u001d`, 0x1e: `\u001e`, 0x1f: `\u001f`,
++}
++
+ var jsStrReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
+- '&': `\x26`,
+- '\'': `\x27`,
+- '+': `\x2b`,
++ '"': `\u0022`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
++ '+': `\u002b`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ '\\': `\\`,
+ }
+
+ // jsStrNormReplacementTable is like jsStrReplacementTable but does not
+ // overencode existing escapes since this table has no entry for `\`.
+ var jsStrNormReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
+- '&': `\x26`,
+- '\'': `\x27`,
+- '+': `\x2b`,
++ '"': `\u0022`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
++ '+': `\u002b`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ }
+-
+ var jsRegexpReplacementTable = []string{
+- 0: `\0`,
++ 0: `\u0000`,
+ '\t': `\t`,
+ '\n': `\n`,
+- '\v': `\x0b`, // "\v" == "v" on IE 6.
++ '\v': `\u000b`, // "\v" == "v" on IE 6.
+ '\f': `\f`,
+ '\r': `\r`,
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+- '"': `\x22`,
++ '"': `\u0022`,
+ '$': `\$`,
+- '&': `\x26`,
+- '\'': `\x27`,
++ '&': `\u0026`,
++ '\'': `\u0027`,
+ '(': `\(`,
+ ')': `\)`,
+ '*': `\*`,
+- '+': `\x2b`,
++ '+': `\u002b`,
+ '-': `\-`,
+ '.': `\.`,
+ '/': `\/`,
+- '<': `\x3c`,
+- '>': `\x3e`,
++ '<': `\u003c`,
++ '>': `\u003e`,
+ '?': `\?`,
+ '[': `\[`,
+ '\\': `\\`,
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index 075adaa..d7ee47b 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -137,7 +137,7 @@ func TestJSValEscaper(t *testing.T) {
+ {"foo", `"foo"`},
+ // Newlines.
+ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
+- // "\v" == "v" on IE 6 so use "\x0b" instead.
++ // "\v" == "v" on IE 6 so use "\u000b" instead.
+ {"\t\x0b", `"\t\u000b"`},
+ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+ {[]interface{}{}, "[]"},
+@@ -173,7 +173,7 @@ func TestJSStrEscaper(t *testing.T) {
+ }{
+ {"", ``},
+ {"foo", `foo`},
+- {"\u0000", `\0`},
++ {"\u0000", `\u0000`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+@@ -183,14 +183,14 @@ func TestJSStrEscaper(t *testing.T) {
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+- {`"`, `\x22`},
+- {`'`, `\x27`},
++ {`"`, `\u0022`},
++ {`'`, `\u0027`},
+ // Allow embedding in HTML without further escaping.
+- {`&amp;`, `\x26amp;`},
++ {`&amp;`, `\u0026amp;`},
+ // Prevent breaking out of text node and element boundaries.
+- {"</script>", `\x3c\/script\x3e`},
+- {"<![CDATA[", `\x3c![CDATA[`},
+- {"]]>", `]]\x3e`},
++ {"</script>", `\u003c\/script\u003e`},
++ {"<![CDATA[", `\u003c![CDATA[`},
++ {"]]>", `]]\u003e`},
+ // https://dev.w3.org/html5/markup/aria/syntax.html#escaping-text-span
+ // "The text in style, script, title, and textarea elements
+ // must not have an escaping text span start that is not
+@@ -201,11 +201,11 @@ func TestJSStrEscaper(t *testing.T) {
+ // allow regular text content to be interpreted as script
+ // allowing script execution via a combination of a JS string
+ // injection followed by an HTML text injection.
+- {"<!--", `\x3c!--`},
+- {"-->", `--\x3e`},
++ {"<!--", `\u003c!--`},
++ {"-->", `--\u003e`},
+ // From https://code.google.com/p/doctype/wiki/ArticleUtf7
+ {"+ADw-script+AD4-alert(1)+ADw-/script+AD4-",
+- `\x2bADw-script\x2bAD4-alert(1)\x2bADw-\/script\x2bAD4-`,
++ `\u002bADw-script\u002bAD4-alert(1)\u002bADw-\/script\u002bAD4-`,
+ },
+ // Invalid UTF-8 sequence
+ {"foo\xA0bar", "foo\xA0bar"},
+@@ -228,7 +228,7 @@ func TestJSRegexpEscaper(t *testing.T) {
+ }{
+ {"", `(?:)`},
+ {"foo", `foo`},
+- {"\u0000", `\0`},
++ {"\u0000", `\u0000`},
+ {"\t", `\t`},
+ {"\n", `\n`},
+ {"\r", `\r`},
+@@ -238,19 +238,19 @@ func TestJSRegexpEscaper(t *testing.T) {
+ {"\\n", `\\n`},
+ {"foo\r\nbar", `foo\r\nbar`},
+ // Preserve attribute boundaries.
+- {`"`, `\x22`},
+- {`'`, `\x27`},
++ {`"`, `\u0022`},
++ {`'`, `\u0027`},
+ // Allow embedding in HTML without further escaping.
+- {`&amp;`, `\x26amp;`},
++ {`&amp;`, `\u0026amp;`},
+ // Prevent breaking out of text node and element boundaries.
+- {"</script>", `\x3c\/script\x3e`},
+- {"<![CDATA[", `\x3c!\[CDATA\[`},
+- {"]]>", `\]\]\x3e`},
++ {"</script>", `\u003c\/script\u003e`},
++ {"<![CDATA[", `\u003c!\[CDATA\[`},
++ {"]]>", `\]\]\u003e`},
+ // Escaping text spans.
+- {"<!--", `\x3c!\-\-`},
+- {"-->", `\-\-\x3e`},
++ {"<!--", `\u003c!\-\-`},
++ {"-->", `\-\-\u003e`},
+ {"*", `\*`},
+- {"+", `\x2b`},
++ {"+", `\u002b`},
+ {"?", `\?`},
+ {"[](){}", `\[\]\(\)\{\}`},
+ {"$foo|x.y", `\$foo\|x\.y`},
+@@ -284,27 +284,27 @@ func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ {
+ "jsStrEscaper",
+ jsStrEscaper,
+- "\\0\x01\x02\x03\x04\x05\x06\x07" +
+- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+- "\x10\x11\x12\x13\x14\x15\x16\x17" +
+- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+- ` !\x22#$%\x26\x27()*\x2b,-.\/` +
+- `0123456789:;\x3c=\x3e?` +
++ `\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
++ `\u0008\t\n\u000b\f\r\u000e\u000f` +
++ `\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
++ `\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
++ ` !\u0022#$%\u0026\u0027()*\u002b,-.\/` +
++ `0123456789:;\u003c=\u003e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+ "`abcdefghijklmno" +
+- "pqrstuvwxyz{|}~\x7f" +
++ "pqrstuvwxyz{|}~\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+ {
+ "jsRegexpEscaper",
+ jsRegexpEscaper,
+- "\\0\x01\x02\x03\x04\x05\x06\x07" +
+- "\x08\\t\\n\\x0b\\f\\r\x0E\x0F" +
+- "\x10\x11\x12\x13\x14\x15\x16\x17" +
+- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
+- ` !\x22#\$%\x26\x27\(\)\*\x2b,\-\.\/` +
+- `0123456789:;\x3c=\x3e\?` +
++ `\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007` +
++ `\u0008\t\n\u000b\f\r\u000e\u000f` +
++ `\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017` +
++ `\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f` +
++ ` !\u0022#\$%\u0026\u0027\(\)\*\u002b,\-\.\/` +
++ `0123456789:;\u003c=\u003e\?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ\[\\\]\^_` +
+ "`abcdefghijklmno" +
+diff --git a/src/html/template/template_test.go b/src/html/template/template_test.go
+index 13e6ba4..86bd4db 100644
+--- a/src/html/template/template_test.go
++++ b/src/html/template/template_test.go
+@@ -6,6 +6,7 @@ package template_test
+
+ import (
+ "bytes"
++ "encoding/json"
+ . "html/template"
+ "strings"
+ "testing"
+@@ -121,6 +122,44 @@ func TestNumbers(t *testing.T) {
+ c.mustExecute(c.root, nil, "12.34 7.5")
+ }
+
++func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
++ // See #33671 and #37634 for more context on this.
++ tests := []struct{ name, in string }{
++ {"empty", ""},
++ {"invalid", string(rune(-1))},
++ {"null", "\u0000"},
++ {"unit separator", "\u001F"},
++ {"tab", "\t"},
++ {"gt and lt", "<>"},
++ {"quotes", `'"`},
++ {"ASCII letters", "ASCII letters"},
++ {"Unicode", "ʕ⊙ϖ⊙ʔ"},
++ {"Pizza", "P"},
++ }
++ const (
++ prefix = `<script type="application/ld+json">`
++ suffix = `</script>`
++ templ = prefix + `"{{.}}"` + suffix
++ )
++ tpl := Must(New("JS string is JSON string").Parse(templ))
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ var buf bytes.Buffer
++ if err := tpl.Execute(&buf, tt.in); err != nil {
++ t.Fatalf("Cannot render template: %v", err)
++ }
++ trimmed := bytes.TrimSuffix(bytes.TrimPrefix(buf.Bytes(), []byte(prefix)), []byte(suffix))
++ var got string
++ if err := json.Unmarshal(trimmed, &got); err != nil {
++ t.Fatalf("Cannot parse JS string %q as JSON: %v", trimmed[1:len(trimmed)-1], err)
++ }
++ if got != tt.in {
++ t.Errorf("Serialization changed the string value: got %q want %q", got, tt.in)
++ }
++ })
++ }
++}
++
+ type testCase struct {
+ t *testing.T
+ root *Template
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index 77294ed..b8a809e 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -911,9 +911,9 @@ func TestJSEscaping(t *testing.T) {
+ {`Go "jump" \`, `Go \"jump\" \\`},
+ {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+ {"unprintable \uFDFF", `unprintable \uFDFF`},
+- {`<html>`, `\x3Chtml\x3E`},
+- {`no = in attributes`, `no \x3D in attributes`},
+- {`&#x27; does not become HTML entity`, `\x26#x27; does not become HTML entity`},
++ {`<html>`, `\u003Chtml\u003E`},
++ {`no = in attributes`, `no \u003D in attributes`},
++ {`&#x27; does not become HTML entity`, `\u0026#x27; does not become HTML entity`},
+ }
+ for _, tc := range testCases {
+ s := JSEscapeString(tc.in)
+diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
+index 46125bc..f3de9fb 100644
+--- a/src/text/template/funcs.go
++++ b/src/text/template/funcs.go
+@@ -640,10 +640,10 @@ var (
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+- jsLt = []byte(`\x3C`)
+- jsGt = []byte(`\x3E`)
+- jsAmp = []byte(`\x26`)
+- jsEq = []byte(`\x3D`)
++ jsLt = []byte(`\u003C`)
++ jsGt = []byte(`\u003E`)
++ jsAmp = []byte(`\u0026`)
++ jsEq = []byte(`\u003D`)
+ )
+
+ // JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch
new file mode 100644
index 0000000000..cd7dd0957c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_3.patch
@@ -0,0 +1,393 @@
+From 7ddce23c7d5b728acf8482f5006497c7b9915f8a Mon Sep 17 00:00:00 2001
+From: Ariel Mashraki <ariel@mashraki.co.il>
+Date: Wed, 22 Apr 2020 22:17:56 +0300
+Subject: [PATCH 3/6] text/template: add CommentNode to template parse tree
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixes #34652
+
+Change-Id: Icf6e3eda593fed826736f34f95a9d66f5450cc98
+Reviewed-on: https://go-review.googlesource.com/c/go/+/229398
+Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
+Run-TryBot: Daniel Martí <mvdan@mvdan.cc>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+
+Dependency Patch #3
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/c8ea03828b0645b1fd5725888e44873b75fcfbb6
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ api/next.txt | 19 +++++++++++++++++++
+ src/html/template/escape.go | 2 ++
+ src/html/template/template_test.go | 16 ++++++++++++++++
+ src/text/template/exec.go | 1 +
+ src/text/template/parse/lex.go | 8 +++++++-
+ src/text/template/parse/lex_test.go | 7 +++++--
+ src/text/template/parse/node.go | 33 +++++++++++++++++++++++++++++++++
+ src/text/template/parse/parse.go | 22 +++++++++++++++++++---
+ src/text/template/parse/parse_test.go | 25 +++++++++++++++++++++++++
+ 9 files changed, 127 insertions(+), 6 deletions(-)
+
+diff --git a/api/next.txt b/api/next.txt
+index e69de29..076f39e 100644
+--- a/api/next.txt
++++ b/api/next.txt
+@@ -0,0 +1,19 @@
++pkg unicode, const Version = "13.0.0"
++pkg unicode, var Chorasmian *RangeTable
++pkg unicode, var Dives_Akuru *RangeTable
++pkg unicode, var Khitan_Small_Script *RangeTable
++pkg unicode, var Yezidi *RangeTable
++pkg text/template/parse, const NodeComment = 20
++pkg text/template/parse, const NodeComment NodeType
++pkg text/template/parse, const ParseComments = 1
++pkg text/template/parse, const ParseComments Mode
++pkg text/template/parse, method (*CommentNode) Copy() Node
++pkg text/template/parse, method (*CommentNode) String() string
++pkg text/template/parse, method (CommentNode) Position() Pos
++pkg text/template/parse, method (CommentNode) Type() NodeType
++pkg text/template/parse, type CommentNode struct
++pkg text/template/parse, type CommentNode struct, Text string
++pkg text/template/parse, type CommentNode struct, embedded NodeType
++pkg text/template/parse, type CommentNode struct, embedded Pos
++pkg text/template/parse, type Mode uint
++pkg text/template/parse, type Tree struct, Mode Mode
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index f12dafa..8739735 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -124,6 +124,8 @@ func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
++ case *parse.CommentNode:
++ return c
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+diff --git a/src/html/template/template_test.go b/src/html/template/template_test.go
+index 86bd4db..1f2c888 100644
+--- a/src/html/template/template_test.go
++++ b/src/html/template/template_test.go
+@@ -10,6 +10,7 @@ import (
+ . "html/template"
+ "strings"
+ "testing"
++ "text/template/parse"
+ )
+
+ func TestTemplateClone(t *testing.T) {
+@@ -160,6 +161,21 @@ func TestStringsInScriptsWithJsonContentTypeAreCorrectlyEscaped(t *testing.T) {
+ }
+ }
+
++func TestSkipEscapeComments(t *testing.T) {
++ c := newTestCase(t)
++ tr := parse.New("root")
++ tr.Mode = parse.ParseComments
++ newT, err := tr.Parse("{{/* A comment */}}{{ 1 }}{{/* Another comment */}}", "", "", make(map[string]*parse.Tree))
++ if err != nil {
++ t.Fatalf("Cannot parse template text: %v", err)
++ }
++ c.root, err = c.root.AddParseTree("root", newT)
++ if err != nil {
++ t.Fatalf("Cannot add parse tree to template: %v", err)
++ }
++ c.mustExecute(c.root, nil, "1")
++}
++
+ type testCase struct {
+ t *testing.T
+ root *Template
+diff --git a/src/text/template/exec.go b/src/text/template/exec.go
+index ac3e741..7ac5175 100644
+--- a/src/text/template/exec.go
++++ b/src/text/template/exec.go
+@@ -256,6 +256,7 @@ func (s *state) walk(dot reflect.Value, node parse.Node) {
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
++ case *parse.CommentNode:
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index 30371f2..e41373a 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -41,6 +41,7 @@ const (
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
++ itemComment // comment text
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemAssign // equals ('=') introducing an assignment
+ itemDeclare // colon-equals (':=') introducing a declaration
+@@ -112,6 +113,7 @@ type lexer struct {
+ leftDelim string // start of action
+ rightDelim string // end of action
+ trimRightDelim string // end of action with trim marker
++ emitComment bool // emit itemComment tokens.
+ pos Pos // current position in the input
+ start Pos // start position of this item
+ width Pos // width of last rune read from input
+@@ -203,7 +205,7 @@ func (l *lexer) drain() {
+ }
+
+ // lex creates a new scanner for the input string.
+-func lex(name, input, left, right string) *lexer {
++func lex(name, input, left, right string, emitComment bool) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+@@ -216,6 +218,7 @@ func lex(name, input, left, right string) *lexer {
+ leftDelim: left,
+ rightDelim: right,
+ trimRightDelim: rightTrimMarker + right,
++ emitComment: emitComment,
+ items: make(chan item),
+ line: 1,
+ startLine: 1,
+@@ -323,6 +326,9 @@ func lexComment(l *lexer) stateFn {
+ if !delim {
+ return l.errorf("comment ends before closing delimiter")
+ }
++ if l.emitComment {
++ l.emit(itemComment)
++ }
+ if trimSpace {
+ l.pos += trimMarkerLen
+ }
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index 563c4fc..f6d5f28 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -15,6 +15,7 @@ var itemName = map[itemType]string{
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
++ itemComment: "comment",
+ itemComplex: "complex",
+ itemDeclare: ":=",
+ itemEOF: "EOF",
+@@ -90,6 +91,7 @@ var lexTests = []lexTest{
+ {"text", `now is the time`, []item{mkItem(itemText, "now is the time"), tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ mkItem(itemText, "hello-"),
++ mkItem(itemComment, "/* this is a comment */"),
+ mkItem(itemText, "-world"),
+ tEOF,
+ }},
+@@ -311,6 +313,7 @@ var lexTests = []lexTest{
+ }},
+ {"trimming spaces before and after comment", "hello- {{- /* hello */ -}} -world", []item{
+ mkItem(itemText, "hello-"),
++ mkItem(itemComment, "/* hello */"),
+ mkItem(itemText, "-world"),
+ tEOF,
+ }},
+@@ -389,7 +392,7 @@ var lexTests = []lexTest{
+
+ // collect gathers the emitted items into a slice.
+ func collect(t *lexTest, left, right string) (items []item) {
+- l := lex(t.name, t.input, left, right)
++ l := lex(t.name, t.input, left, right, true)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+@@ -529,7 +532,7 @@ func TestPos(t *testing.T) {
+ func TestShutdown(t *testing.T) {
+ // We need to duplicate template.Parse here to hold on to the lexer.
+ const text = "erroneous{{define}}{{else}}1234"
+- lexer := lex("foo", text, "{{", "}}")
++ lexer := lex("foo", text, "{{", "}}", false)
+ _, err := New("root").parseLexer(lexer)
+ if err == nil {
+ t.Fatalf("expected error")
+diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
+index 1c116ea..a9dad5e 100644
+--- a/src/text/template/parse/node.go
++++ b/src/text/template/parse/node.go
+@@ -70,6 +70,7 @@ const (
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
++ NodeComment // A comment.
+ )
+
+ // Nodes.
+@@ -149,6 +150,38 @@ func (t *TextNode) Copy() Node {
+ return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
+ }
+
++// CommentNode holds a comment.
++type CommentNode struct {
++ NodeType
++ Pos
++ tr *Tree
++ Text string // Comment text.
++}
++
++func (t *Tree) newComment(pos Pos, text string) *CommentNode {
++ return &CommentNode{tr: t, NodeType: NodeComment, Pos: pos, Text: text}
++}
++
++func (c *CommentNode) String() string {
++ var sb strings.Builder
++ c.writeTo(&sb)
++ return sb.String()
++}
++
++func (c *CommentNode) writeTo(sb *strings.Builder) {
++ sb.WriteString("{{")
++ sb.WriteString(c.Text)
++ sb.WriteString("}}")
++}
++
++func (c *CommentNode) tree() *Tree {
++ return c.tr
++}
++
++func (c *CommentNode) Copy() Node {
++ return &CommentNode{tr: c.tr, NodeType: NodeComment, Pos: c.Pos, Text: c.Text}
++}
++
+ // PipeNode holds a pipeline with optional declaration
+ type PipeNode struct {
+ NodeType
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index c9b80f4..496d8bf 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -21,6 +21,7 @@ type Tree struct {
+ Name string // name of the template represented by the tree.
+ ParseName string // name of the top-level template during parsing, for error messages.
+ Root *ListNode // top-level root of the tree.
++ Mode Mode // parsing mode.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+@@ -29,8 +30,16 @@ type Tree struct {
+ peekCount int
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
++ mode Mode
+ }
+
++// A mode value is a set of flags (or 0). Modes control parser behavior.
++type Mode uint
++
++const (
++ ParseComments Mode = 1 << iota // parse comments and add them to AST
++)
++
+ // Copy returns a copy of the Tree. Any parsing state is discarded.
+ func (t *Tree) Copy() *Tree {
+ if t == nil {
+@@ -220,7 +229,8 @@ func (t *Tree) stopParse() {
+ func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.ParseName = t.Name
+- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim), treeSet)
++ emitComment := t.Mode&ParseComments != 0
++ t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
+ t.text = text
+ t.parse()
+ t.add()
+@@ -240,12 +250,14 @@ func (t *Tree) add() {
+ }
+ }
+
+-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
++// IsEmptyTree reports whether this tree (node) is empty of everything but space or comments.
+ func IsEmptyTree(n Node) bool {
+ switch n := n.(type) {
+ case nil:
+ return true
+ case *ActionNode:
++ case *CommentNode:
++ return true
+ case *IfNode:
+ case *ListNode:
+ for _, node := range n.Nodes {
+@@ -276,6 +288,7 @@ func (t *Tree) parse() {
+ if t.nextNonSpace().typ == itemDefine {
+ newT := New("definition") // name will be updated once we know it.
+ newT.text = t.text
++ newT.Mode = t.Mode
+ newT.ParseName = t.ParseName
+ newT.startParse(t.funcs, t.lex, t.treeSet)
+ newT.parseDefinition()
+@@ -331,13 +344,15 @@ func (t *Tree) itemList() (list *ListNode, next Node) {
+ }
+
+ // textOrAction:
+-// text | action
++// text | comment | action
+ func (t *Tree) textOrAction() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
+ return t.action()
++ case itemComment:
++ return t.newComment(token.pos, token.val)
+ default:
+ t.unexpected(token, "input")
+ }
+@@ -539,6 +554,7 @@ func (t *Tree) blockControl() Node {
+
+ block := New(name) // name will be updated once we know it.
+ block.text = t.text
++ block.Mode = t.Mode
+ block.ParseName = t.ParseName
+ block.startParse(t.funcs, t.lex, t.treeSet)
+ var end Node
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index 4e09a78..d9c13c5 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -348,6 +348,30 @@ func TestParseCopy(t *testing.T) {
+ testParse(true, t)
+ }
+
++func TestParseWithComments(t *testing.T) {
++ textFormat = "%q"
++ defer func() { textFormat = "%s" }()
++ tests := [...]parseTest{
++ {"comment", "{{/*\n\n\n*/}}", noError, "{{/*\n\n\n*/}}"},
++ {"comment trim left", "x \r\n\t{{- /* hi */}}", noError, `"x"{{/* hi */}}`},
++ {"comment trim right", "{{/* hi */ -}}\n\n\ty", noError, `{{/* hi */}}"y"`},
++ {"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x"{{/* */}}"y"`},
++ }
++ for _, test := range tests {
++ t.Run(test.name, func(t *testing.T) {
++ tr := New(test.name)
++ tr.Mode = ParseComments
++ tmpl, err := tr.Parse(test.input, "", "", make(map[string]*Tree))
++ if err != nil {
++ t.Errorf("%q: expected error; got none", test.name)
++ }
++ if result := tmpl.Root.String(); result != test.result {
++ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
++ }
++ })
++ }
++}
++
+ type isEmptyTest struct {
+ name string
+ input string
+@@ -358,6 +382,7 @@ var isEmptyTests = []isEmptyTest{
+ {"empty", ``, true},
+ {"nonempty", `hello`, false},
+ {"spaces only", " \t\n \t\n", true},
++ {"comment only", "{{/* comment */}}", true},
+ {"definition", `{{define "x"}}something{{end}}`, true},
+ {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
+ {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch
new file mode 100644
index 0000000000..d5e2eb6684
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_4.patch
@@ -0,0 +1,497 @@
+From 760d88497091fb5d6d231a18e6f4e06ecb9af9b2 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 10 Sep 2020 18:53:26 -0400
+Subject: [PATCH 4/6] text/template: allow newlines inside action delimiters
+
+This allows multiline constructs like:
+
+ {{"hello" |
+ printf}}
+
+Now that unclosed actions can span multiple lines,
+track and report the start of the action when reporting errors.
+
+Also clean up a few "unexpected <error message>" to be just "<error message>".
+
+Fixes #29770.
+
+Change-Id: I54c6c016029a8328b7902a4b6d85eab713ec3285
+Reviewed-on: https://go-review.googlesource.com/c/go/+/254257
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Rob Pike <r@golang.org>
+
+Dependency Patch #4
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/9384d34c58099657bb1b133beaf3ff37ada9b017
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/text/template/doc.go | 21 ++++-----
+ src/text/template/exec_test.go | 2 +-
+ src/text/template/parse/lex.go | 84 +++++++++++++++++------------------
+ src/text/template/parse/lex_test.go | 2 +-
+ src/text/template/parse/parse.go | 59 +++++++++++++-----------
+ src/text/template/parse/parse_test.go | 36 ++++++++++++---
+ 6 files changed, 117 insertions(+), 87 deletions(-)
+
+diff --git a/src/text/template/doc.go b/src/text/template/doc.go
+index 4b0efd2..7b30294 100644
+--- a/src/text/template/doc.go
++++ b/src/text/template/doc.go
+@@ -40,16 +40,17 @@ More intricate examples appear below.
+ Text and spaces
+
+ By default, all text between actions is copied verbatim when the template is
+-executed. For example, the string " items are made of " in the example above appears
+-on standard output when the program is run.
+-
+-However, to aid in formatting template source code, if an action's left delimiter
+-(by default "{{") is followed immediately by a minus sign and ASCII space character
+-("{{- "), all trailing white space is trimmed from the immediately preceding text.
+-Similarly, if the right delimiter ("}}") is preceded by a space and minus sign
+-(" -}}"), all leading white space is trimmed from the immediately following text.
+-In these trim markers, the ASCII space must be present; "{{-3}}" parses as an
+-action containing the number -3.
++executed. For example, the string " items are made of " in the example above
++appears on standard output when the program is run.
++
++However, to aid in formatting template source code, if an action's left
++delimiter (by default "{{") is followed immediately by a minus sign and white
++space, all trailing white space is trimmed from the immediately preceding text.
++Similarly, if the right delimiter ("}}") is preceded by white space and a minus
++sign, all leading white space is trimmed from the immediately following text.
++In these trim markers, the white space must be present:
++"{{- 3}}" is like "{{3}}" but trims the immediately preceding text, while
++"{{-3}}" parses as an action containing the number -3.
+
+ For instance, when executing the template whose source is
+
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index b8a809e..3309b33 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -1295,7 +1295,7 @@ func TestUnterminatedStringError(t *testing.T) {
+ t.Fatal("expected error")
+ }
+ str := err.Error()
+- if !strings.Contains(str, "X:3: unexpected unterminated raw quoted string") {
++ if !strings.Contains(str, "X:3: unterminated raw quoted string") {
+ t.Fatalf("unexpected error: %s", str)
+ }
+ }
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index e41373a..6784071 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -92,15 +92,14 @@ const eof = -1
+ // If the action begins "{{- " rather than "{{", then all space/tab/newlines
+ // preceding the action are trimmed; conversely if it ends " -}}" the
+ // leading spaces are trimmed. This is done entirely in the lexer; the
+-// parser never sees it happen. We require an ASCII space to be
+-// present to avoid ambiguity with things like "{{-3}}". It reads
++// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)
++// to be present to avoid ambiguity with things like "{{-3}}". It reads
+ // better with the space present anyway. For simplicity, only ASCII
+-// space does the job.
++// does the job.
+ const (
+- spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
+- leftTrimMarker = "- " // Attached to left delimiter, trims trailing spaces from preceding text.
+- rightTrimMarker = " -" // Attached to right delimiter, trims leading spaces from following text.
+- trimMarkerLen = Pos(len(leftTrimMarker))
++ spaceChars = " \t\r\n" // These are the space characters defined by Go itself.
++ trimMarker = '-' // Attached to left/right delimiter, trims trailing spaces from preceding/following text.
++ trimMarkerLen = Pos(1 + 1) // marker plus space before or after
+ )
+
+ // stateFn represents the state of the scanner as a function that returns the next state.
+@@ -108,19 +107,18 @@ type stateFn func(*lexer) stateFn
+
+ // lexer holds the state of the scanner.
+ type lexer struct {
+- name string // the name of the input; used only for error reports
+- input string // the string being scanned
+- leftDelim string // start of action
+- rightDelim string // end of action
+- trimRightDelim string // end of action with trim marker
+- emitComment bool // emit itemComment tokens.
+- pos Pos // current position in the input
+- start Pos // start position of this item
+- width Pos // width of last rune read from input
+- items chan item // channel of scanned items
+- parenDepth int // nesting depth of ( ) exprs
+- line int // 1+number of newlines seen
+- startLine int // start line of this item
++ name string // the name of the input; used only for error reports
++ input string // the string being scanned
++ leftDelim string // start of action
++ rightDelim string // end of action
++ emitComment bool // emit itemComment tokens.
++ pos Pos // current position in the input
++ start Pos // start position of this item
++ width Pos // width of last rune read from input
++ items chan item // channel of scanned items
++ parenDepth int // nesting depth of ( ) exprs
++ line int // 1+number of newlines seen
++ startLine int // start line of this item
+ }
+
+ // next returns the next rune in the input.
+@@ -213,15 +211,14 @@ func lex(name, input, left, right string, emitComment bool) *lexer {
+ right = rightDelim
+ }
+ l := &lexer{
+- name: name,
+- input: input,
+- leftDelim: left,
+- rightDelim: right,
+- trimRightDelim: rightTrimMarker + right,
+- emitComment: emitComment,
+- items: make(chan item),
+- line: 1,
+- startLine: 1,
++ name: name,
++ input: input,
++ leftDelim: left,
++ rightDelim: right,
++ emitComment: emitComment,
++ items: make(chan item),
++ line: 1,
++ startLine: 1,
+ }
+ go l.run()
+ return l
+@@ -251,7 +248,7 @@ func lexText(l *lexer) stateFn {
+ ldn := Pos(len(l.leftDelim))
+ l.pos += Pos(x)
+ trimLength := Pos(0)
+- if strings.HasPrefix(l.input[l.pos+ldn:], leftTrimMarker) {
++ if hasLeftTrimMarker(l.input[l.pos+ldn:]) {
+ trimLength = rightTrimLength(l.input[l.start:l.pos])
+ }
+ l.pos -= trimLength
+@@ -280,7 +277,7 @@ func rightTrimLength(s string) Pos {
+
+ // atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.
+ func (l *lexer) atRightDelim() (delim, trimSpaces bool) {
+- if strings.HasPrefix(l.input[l.pos:], l.trimRightDelim) { // With trim marker.
++ if hasRightTrimMarker(l.input[l.pos:]) && strings.HasPrefix(l.input[l.pos+trimMarkerLen:], l.rightDelim) { // With trim marker.
+ return true, true
+ }
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) { // Without trim marker.
+@@ -297,7 +294,7 @@ func leftTrimLength(s string) Pos {
+ // lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.
+ func lexLeftDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.leftDelim))
+- trimSpace := strings.HasPrefix(l.input[l.pos:], leftTrimMarker)
++ trimSpace := hasLeftTrimMarker(l.input[l.pos:])
+ afterMarker := Pos(0)
+ if trimSpace {
+ afterMarker = trimMarkerLen
+@@ -342,7 +339,7 @@ func lexComment(l *lexer) stateFn {
+
+ // lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.
+ func lexRightDelim(l *lexer) stateFn {
+- trimSpace := strings.HasPrefix(l.input[l.pos:], rightTrimMarker)
++ trimSpace := hasRightTrimMarker(l.input[l.pos:])
+ if trimSpace {
+ l.pos += trimMarkerLen
+ l.ignore()
+@@ -369,7 +366,7 @@ func lexInsideAction(l *lexer) stateFn {
+ return l.errorf("unclosed left paren")
+ }
+ switch r := l.next(); {
+- case r == eof || isEndOfLine(r):
++ case r == eof:
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ l.backup() // Put space back in case we have " -}}".
+@@ -439,7 +436,7 @@ func lexSpace(l *lexer) stateFn {
+ }
+ // Be careful about a trim-marked closing delimiter, which has a minus
+ // after a space. We know there is a space, so check for the '-' that might follow.
+- if strings.HasPrefix(l.input[l.pos-1:], l.trimRightDelim) {
++ if hasRightTrimMarker(l.input[l.pos-1:]) && strings.HasPrefix(l.input[l.pos-1+trimMarkerLen:], l.rightDelim) {
+ l.backup() // Before the space.
+ if numSpaces == 1 {
+ return lexRightDelim // On the delim, so go right to that.
+@@ -526,7 +523,7 @@ func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+ // day to implement arithmetic.
+ func (l *lexer) atTerminator() bool {
+ r := l.peek()
+- if isSpace(r) || isEndOfLine(r) {
++ if isSpace(r) {
+ return true
+ }
+ switch r {
+@@ -657,15 +654,18 @@ Loop:
+
+ // isSpace reports whether r is a space character.
+ func isSpace(r rune) bool {
+- return r == ' ' || r == '\t'
+-}
+-
+-// isEndOfLine reports whether r is an end-of-line character.
+-func isEndOfLine(r rune) bool {
+- return r == '\r' || r == '\n'
++ return r == ' ' || r == '\t' || r == '\r' || r == '\n'
+ }
+
+ // isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+ func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+ }
++
++func hasLeftTrimMarker(s string) bool {
++ return len(s) >= 2 && s[0] == trimMarker && isSpace(rune(s[1]))
++}
++
++func hasRightTrimMarker(s string) bool {
++ return len(s) >= 2 && isSpace(rune(s[0])) && s[1] == trimMarker
++}
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index f6d5f28..6510eed 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -323,7 +323,7 @@ var lexTests = []lexTest{
+ tLeft,
+ mkItem(itemError, "unrecognized character in action: U+0001"),
+ }},
+- {"unclosed action", "{{\n}}", []item{
++ {"unclosed action", "{{", []item{
+ tLeft,
+ mkItem(itemError, "unclosed action"),
+ }},
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index 496d8bf..5e6e512 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -24,13 +24,14 @@ type Tree struct {
+ Mode Mode // parsing mode.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+- funcs []map[string]interface{}
+- lex *lexer
+- token [3]item // three-token lookahead for parser.
+- peekCount int
+- vars []string // variables defined at the moment.
+- treeSet map[string]*Tree
+- mode Mode
++ funcs []map[string]interface{}
++ lex *lexer
++ token [3]item // three-token lookahead for parser.
++ peekCount int
++ vars []string // variables defined at the moment.
++ treeSet map[string]*Tree
++ actionLine int // line of left delim starting action
++ mode Mode
+ }
+
+ // A mode value is a set of flags (or 0). Modes control parser behavior.
+@@ -187,6 +188,16 @@ func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+
+ // unexpected complains about the token and terminates processing.
+ func (t *Tree) unexpected(token item, context string) {
++ if token.typ == itemError {
++ extra := ""
++ if t.actionLine != 0 && t.actionLine != token.line {
++ extra = fmt.Sprintf(" in action started at %s:%d", t.ParseName, t.actionLine)
++ if strings.HasSuffix(token.val, " action") {
++ extra = extra[len(" in action"):] // avoid "action in action"
++ }
++ }
++ t.errorf("%s%s", token, extra)
++ }
+ t.errorf("unexpected %s in %s", token, context)
+ }
+
+@@ -350,6 +361,8 @@ func (t *Tree) textOrAction() Node {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
++ t.actionLine = token.line
++ defer t.clearActionLine()
+ return t.action()
+ case itemComment:
+ return t.newComment(token.pos, token.val)
+@@ -359,6 +372,10 @@ func (t *Tree) textOrAction() Node {
+ return nil
+ }
+
++func (t *Tree) clearActionLine() {
++ t.actionLine = 0
++}
++
+ // Action:
+ // control
+ // command ("|" command)*
+@@ -384,12 +401,12 @@ func (t *Tree) action() (n Node) {
+ t.backup()
+ token := t.peek()
+ // Do not pop variables; they persist until "end".
+- return t.newAction(token.pos, token.line, t.pipeline("command"))
++ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+ }
+
+ // Pipeline:
+ // declarations? command ('|' command)*
+-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
++func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+ token := t.peekNonSpace()
+ pipe = t.newPipeline(token.pos, token.line, nil)
+ // Are there declarations or assignments?
+@@ -430,12 +447,9 @@ decls:
+ }
+ for {
+ switch token := t.nextNonSpace(); token.typ {
+- case itemRightDelim, itemRightParen:
++ case end:
+ // At this point, the pipeline is complete
+ t.checkPipeline(pipe, context)
+- if token.typ == itemRightParen {
+- t.backup()
+- }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+@@ -464,7 +478,7 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+
+ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+- pipe = t.pipeline(context)
++ pipe = t.pipeline(context, itemRightDelim)
+ var next Node
+ list, next = t.itemList()
+ switch next.Type() {
+@@ -550,7 +564,7 @@ func (t *Tree) blockControl() Node {
+
+ token := t.nextNonSpace()
+ name := t.parseTemplateName(token, context)
+- pipe := t.pipeline(context)
++ pipe := t.pipeline(context, itemRightDelim)
+
+ block := New(name) // name will be updated once we know it.
+ block.text = t.text
+@@ -580,7 +594,7 @@ func (t *Tree) templateControl() Node {
+ if t.nextNonSpace().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+- pipe = t.pipeline(context)
++ pipe = t.pipeline(context, itemRightDelim)
+ }
+ return t.newTemplate(token.pos, token.line, name, pipe)
+ }
+@@ -614,13 +628,12 @@ func (t *Tree) command() *CommandNode {
+ switch token := t.next(); token.typ {
+ case itemSpace:
+ continue
+- case itemError:
+- t.errorf("%s", token.val)
+ case itemRightDelim, itemRightParen:
+ t.backup()
+ case itemPipe:
++ // nothing here; break loop below
+ default:
+- t.errorf("unexpected %s in operand", token)
++ t.unexpected(token, "operand")
+ }
+ break
+ }
+@@ -675,8 +688,6 @@ func (t *Tree) operand() Node {
+ // A nil return means the next item is not a term.
+ func (t *Tree) term() Node {
+ switch token := t.nextNonSpace(); token.typ {
+- case itemError:
+- t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+@@ -699,11 +710,7 @@ func (t *Tree) term() Node {
+ }
+ return number
+ case itemLeftParen:
+- pipe := t.pipeline("parenthesized pipeline")
+- if token := t.next(); token.typ != itemRightParen {
+- t.errorf("unclosed right paren: unexpected %s", token)
+- }
+- return pipe
++ return t.pipeline("parenthesized pipeline", itemRightParen)
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index d9c13c5..220f984 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -250,6 +250,13 @@ var parseTests = []parseTest{
+ {"comment trim left and right", "x \r\n\t{{- /* */ -}}\n\n\ty", noError, `"x""y"`},
+ {"block definition", `{{block "foo" .}}hello{{end}}`, noError,
+ `{{template "foo" .}}`},
++
++ {"newline in assignment", "{{ $x \n := \n 1 \n }}", noError, "{{$x := 1}}"},
++ {"newline in empty action", "{{\n}}", hasError, "{{\n}}"},
++ {"newline in pipeline", "{{\n\"x\"\n|\nprintf\n}}", noError, `{{"x" | printf}}`},
++ {"newline in comment", "{{/*\nhello\n*/}}", noError, ""},
++ {"newline in comment", "{{-\n/*\nhello\n*/\n-}}", noError, ""},
++
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+@@ -426,23 +433,38 @@ var errorTests = []parseTest{
+ // Check line numbers are accurate.
+ {"unclosed1",
+ "line1\n{{",
+- hasError, `unclosed1:2: unexpected unclosed action in command`},
++ hasError, `unclosed1:2: unclosed action`},
+ {"unclosed2",
+ "line1\n{{define `x`}}line2\n{{",
+- hasError, `unclosed2:3: unexpected unclosed action in command`},
++ hasError, `unclosed2:3: unclosed action`},
++ {"unclosed3",
++ "line1\n{{\"x\"\n\"y\"\n",
++ hasError, `unclosed3:4: unclosed action started at unclosed3:2`},
++ {"unclosed4",
++ "{{\n\n\n\n\n",
++ hasError, `unclosed4:6: unclosed action started at unclosed4:1`},
++ {"var1",
++ "line1\n{{\nx\n}}",
++ hasError, `var1:3: function "x" not defined`},
+ // Specific errors.
+ {"function",
+ "{{foo}}",
+ hasError, `function "foo" not defined`},
+- {"comment",
++ {"comment1",
+ "{{/*}}",
+- hasError, `unclosed comment`},
++ hasError, `comment1:1: unclosed comment`},
++ {"comment2",
++ "{{/*\nhello\n}}",
++ hasError, `comment2:1: unclosed comment`},
+ {"lparen",
+ "{{.X (1 2 3}}",
+ hasError, `unclosed left paren`},
+ {"rparen",
+- "{{.X 1 2 3)}}",
+- hasError, `unexpected ")"`},
++ "{{.X 1 2 3 ) }}",
++ hasError, `unexpected ")" in command`},
++ {"rparen2",
++ "{{(.X 1 2 3",
++ hasError, `unclosed action`},
+ {"space",
+ "{{`x`3}}",
+ hasError, `in operand`},
+@@ -488,7 +510,7 @@ var errorTests = []parseTest{
+ hasError, `missing value for parenthesized pipeline`},
+ {"multilinerawstring",
+ "{{ $v := `\n` }} {{",
+- hasError, `multilinerawstring:2: unexpected unclosed action`},
++ hasError, `multilinerawstring:2: unclosed action`},
+ {"rangeundefvar",
+ "{{range $k}}{{end}}",
+ hasError, `undefined variable`},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch
new file mode 100644
index 0000000000..fc38929648
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_5.patch
@@ -0,0 +1,585 @@
+From e0e6bca6ddc0e6d9fa3a5b644af9b446924fbf83 Mon Sep 17 00:00:00 2001
+From: Russ Cox <rsc@golang.org>
+Date: Thu, 20 May 2021 12:46:33 -0400
+Subject: [PATCH 5/6] html/template, text/template: implement break and
+ continue for range loops
+
+Break and continue for range loops was accepted as a proposal in June 2017.
+It was implemented in CL 66410 (Oct 2017)
+but then rolled back in CL 92155 (Feb 2018)
+because html/template changes had not been implemented.
+
+This CL reimplements break and continue in text/template
+and then adds support for them in html/template as well.
+
+Fixes #20531.
+
+Change-Id: I05330482a976f1c078b4b49c2287bd9031bb7616
+Reviewed-on: https://go-review.googlesource.com/c/go/+/321491
+Trust: Russ Cox <rsc@golang.org>
+Run-TryBot: Russ Cox <rsc@golang.org>
+TryBot-Result: Go Bot <gobot@golang.org>
+Reviewed-by: Rob Pike <r@golang.org>
+
+Dependency Patch #5
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/d0dd26a88c019d54f22463daae81e785f5867565
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 4 ++
+ src/html/template/escape.go | 71 ++++++++++++++++++++++++++++++++++-
+ src/html/template/escape_test.go | 24 ++++++++++++
+ src/text/template/doc.go | 8 ++++
+ src/text/template/exec.go | 24 +++++++++++-
+ src/text/template/exec_test.go | 2 +
+ src/text/template/parse/lex.go | 13 ++++++-
+ src/text/template/parse/lex_test.go | 2 +
+ src/text/template/parse/node.go | 36 ++++++++++++++++++
+ src/text/template/parse/parse.go | 42 ++++++++++++++++++++-
+ src/text/template/parse/parse_test.go | 8 ++++
+ 11 files changed, 230 insertions(+), 4 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..aaa7d08 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -6,6 +6,7 @@ package template
+
+ import (
+ "fmt"
++ "text/template/parse"
+ )
+
+ // context describes the state an HTML parser must be in when it reaches the
+@@ -22,6 +23,7 @@ type context struct {
+ jsCtx jsCtx
+ attr attr
+ element element
++ n parse.Node // for range break/continue
+ err *Error
+ }
+
+@@ -141,6 +143,8 @@ const (
+ // stateError is an infectious error state outside any valid
+ // HTML/CSS/JS construct.
+ stateError
++ // stateDead marks unreachable code after a {{break}} or {{continue}}.
++ stateDead
+ )
+
+ // isComment is true for any state that contains content meant for template
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 8739735..6dea79c 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -97,6 +97,15 @@ type escaper struct {
+ actionNodeEdits map[*parse.ActionNode][]string
+ templateNodeEdits map[*parse.TemplateNode]string
+ textNodeEdits map[*parse.TextNode][]byte
++ // rangeContext holds context about the current range loop.
++ rangeContext *rangeContext
++}
++
++// rangeContext holds information about the current range loop.
++type rangeContext struct {
++ outer *rangeContext // outer loop
++ breaks []context // context at each break action
++ continues []context // context at each continue action
+ }
+
+ // makeEscaper creates a blank escaper for the given set.
+@@ -109,6 +118,7 @@ func makeEscaper(n *nameSpace) escaper {
+ map[*parse.ActionNode][]string{},
+ map[*parse.TemplateNode]string{},
+ map[*parse.TextNode][]byte{},
++ nil,
+ }
+ }
+
+@@ -124,8 +134,16 @@ func (e *escaper) escape(c context, n parse.Node) context {
+ switch n := n.(type) {
+ case *parse.ActionNode:
+ return e.escapeAction(c, n)
++ case *parse.BreakNode:
++ c.n = n
++ e.rangeContext.breaks = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.CommentNode:
+ return c
++ case *parse.ContinueNode:
++ c.n = n
++ e.rangeContext.continues = append(e.rangeContext.breaks, c)
++ return context{state: stateDead}
+ case *parse.IfNode:
+ return e.escapeBranch(c, &n.BranchNode, "if")
+ case *parse.ListNode:
+@@ -427,6 +445,12 @@ func join(a, b context, node parse.Node, nodeName string) context {
+ if b.state == stateError {
+ return b
+ }
++ if a.state == stateDead {
++ return b
++ }
++ if b.state == stateDead {
++ return a
++ }
+ if a.eq(b) {
+ return a
+ }
+@@ -466,14 +490,27 @@ func join(a, b context, node parse.Node, nodeName string) context {
+
+ // escapeBranch escapes a branch template node: "if", "range" and "with".
+ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string) context {
++ if nodeName == "range" {
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
++ }
+ c0 := e.escapeList(c, n.List)
+- if nodeName == "range" && c0.state != stateError {
++ if nodeName == "range" {
++ if c0.state != stateError {
++ c0 = joinRange(c0, e.rangeContext)
++ }
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
++
+ // The "true" branch of a "range" node can execute multiple times.
+ // We check that executing n.List once results in the same context
+ // as executing n.List twice.
++ e.rangeContext = &rangeContext{outer: e.rangeContext}
+ c1, _ := e.escapeListConditionally(c0, n.List, nil)
+ c0 = join(c0, c1, n, nodeName)
+ if c0.state == stateError {
++ e.rangeContext = e.rangeContext.outer
+ // Make clear that this is a problem on loop re-entry
+ // since developers tend to overlook that branch when
+ // debugging templates.
+@@ -481,11 +518,39 @@ func (e *escaper) escapeBranch(c context, n *parse.BranchNode, nodeName string)
+ c0.err.Description = "on range loop re-entry: " + c0.err.Description
+ return c0
+ }
++ c0 = joinRange(c0, e.rangeContext)
++ e.rangeContext = e.rangeContext.outer
++ if c0.state == stateError {
++ return c0
++ }
+ }
+ c1 := e.escapeList(c, n.ElseList)
+ return join(c0, c1, n, nodeName)
+ }
+
++func joinRange(c0 context, rc *rangeContext) context {
++ // Merge contexts at break and continue statements into overall body context.
++ // In theory we could treat breaks differently from continues, but for now it is
++ // enough to treat them both as going back to the start of the loop (which may then stop).
++ for _, c := range rc.breaks {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.BreakNode).Line
++ c0.err.Description = "at range loop break: " + c0.err.Description
++ return c0
++ }
++ }
++ for _, c := range rc.continues {
++ c0 = join(c0, c, c.n, "range")
++ if c0.state == stateError {
++ c0.err.Line = c.n.(*parse.ContinueNode).Line
++ c0.err.Description = "at range loop continue: " + c0.err.Description
++ return c0
++ }
++ }
++ return c0
++}
++
+ // escapeList escapes a list template node.
+ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ if n == nil {
+@@ -493,6 +558,9 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ }
+ for _, m := range n.Nodes {
+ c = e.escape(c, m)
++ if c.state == stateDead {
++ break
++ }
+ }
+ return c
+ }
+@@ -503,6 +571,7 @@ func (e *escaper) escapeList(c context, n *parse.ListNode) context {
+ // which is the same as whether e was updated.
+ func (e *escaper) escapeListConditionally(c context, n *parse.ListNode, filter func(*escaper, context) bool) (context, bool) {
+ e1 := makeEscaper(e.ns)
++ e1.rangeContext = e.rangeContext
+ // Make type inferences available to f.
+ for k, v := range e.output {
+ e1.output[k] = v
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index c709660..fa2b84a 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -920,6 +920,22 @@ func TestErrors(t *testing.T) {
+ "<a href='/foo?{{range .Items}}&{{.K}}={{.V}}{{end}}'>",
+ "",
+ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{continue}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{break}}{{end}}",
++ "",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -956,6 +972,14 @@ func TestErrors(t *testing.T) {
+ "z:2:8: on range loop re-entry: {{range}} branches",
+ },
+ {
++ "{{range .Items}}<a{{if .X}}{{break}}{{end}}>{{end}}",
++ "z:1:29: at range loop break: {{range}} branches end in different contexts",
++ },
++ {
++ "{{range .Items}}<a{{if .X}}{{continue}}{{end}}>{{end}}",
++ "z:1:29: at range loop continue: {{range}} branches end in different contexts",
++ },
++ {
+ "<a b=1 c={{.H}}",
+ "z: ends in a non-text context: {stateAttr delimSpaceOrTagEnd",
+ },
+diff --git a/src/text/template/doc.go b/src/text/template/doc.go
+index 7b30294..0228b15 100644
+--- a/src/text/template/doc.go
++++ b/src/text/template/doc.go
+@@ -112,6 +112,14 @@ data, defined in detail in the corresponding sections that follow.
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
++ {{break}}
++ The innermost {{range pipeline}} loop is ended early, stopping the
++ current iteration and bypassing all remaining iterations.
++
++ {{continue}}
++ The current iteration of the innermost {{range pipeline}} loop is
++ stopped, and the loop starts the next iteration.
++
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+diff --git a/src/text/template/exec.go b/src/text/template/exec.go
+index 7ac5175..6cb140a 100644
+--- a/src/text/template/exec.go
++++ b/src/text/template/exec.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "fmt"
+ "internal/fmtsort"
+ "io"
+@@ -244,6 +245,12 @@ func (t *Template) DefinedTemplates() string {
+ return b.String()
+ }
+
++// Sentinel errors for use with panic to signal early exits from range loops.
++var (
++ walkBreak = errors.New("break")
++ walkContinue = errors.New("continue")
++)
++
+ // Walk functions step through the major pieces of the template structure,
+ // generating output as they go.
+ func (s *state) walk(dot reflect.Value, node parse.Node) {
+@@ -256,7 +263,11 @@ func (s *state) walk(dot reflect.Value, node parse.Node) {
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
++ case *parse.BreakNode:
++ panic(walkBreak)
+ case *parse.CommentNode:
++ case *parse.ContinueNode:
++ panic(walkContinue)
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+@@ -335,6 +346,11 @@ func isTrue(val reflect.Value) (truth, ok bool) {
+
+ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ s.at(r)
++ defer func() {
++ if r := recover(); r != nil && r != walkBreak {
++ panic(r)
++ }
++ }()
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+@@ -348,8 +364,14 @@ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ if len(r.Pipe.Decl) > 1 {
+ s.setTopVar(2, index)
+ }
++ defer s.pop(mark)
++ defer func() {
++ // Consume panic(walkContinue)
++ if r := recover(); r != nil && r != walkContinue {
++ panic(r)
++ }
++ }()
+ s.walk(elem, r.List)
+- s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
+index 3309b33..a639f44 100644
+--- a/src/text/template/exec_test.go
++++ b/src/text/template/exec_test.go
+@@ -563,6 +563,8 @@ var execTests = []execTest{
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
++ {"range []int break else", "{{range .SI}}-{{.}}-{{break}}NOTREACHED{{else}}EMPTY{{end}}", "-3-", tVal, true},
++ {"range []int continue else", "{{range .SI}}-{{.}}-{{continue}}NOTREACHED{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
+index 6784071..95e3377 100644
+--- a/src/text/template/parse/lex.go
++++ b/src/text/template/parse/lex.go
+@@ -62,6 +62,8 @@ const (
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemBlock // block keyword
++ itemBreak // break keyword
++ itemContinue // continue keyword
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+@@ -76,6 +78,8 @@ const (
+ var key = map[string]itemType{
+ ".": itemDot,
+ "block": itemBlock,
++ "break": itemBreak,
++ "continue": itemContinue,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+@@ -119,6 +123,8 @@ type lexer struct {
+ parenDepth int // nesting depth of ( ) exprs
+ line int // 1+number of newlines seen
+ startLine int // start line of this item
++ breakOK bool // break keyword allowed
++ continueOK bool // continue keyword allowed
+ }
+
+ // next returns the next rune in the input.
+@@ -461,7 +467,12 @@ Loop:
+ }
+ switch {
+ case key[word] > itemKeyword:
+- l.emit(key[word])
++ item := key[word]
++ if item == itemBreak && !l.breakOK || item == itemContinue && !l.continueOK {
++ l.emit(itemIdentifier)
++ } else {
++ l.emit(item)
++ }
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
+index 6510eed..df6aabf 100644
+--- a/src/text/template/parse/lex_test.go
++++ b/src/text/template/parse/lex_test.go
+@@ -35,6 +35,8 @@ var itemName = map[itemType]string{
+ // keywords
+ itemDot: ".",
+ itemBlock: "block",
++ itemBreak: "break",
++ itemContinue: "continue",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
+index a9dad5e..c398da0 100644
+--- a/src/text/template/parse/node.go
++++ b/src/text/template/parse/node.go
+@@ -71,6 +71,8 @@ const (
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+ NodeComment // A comment.
++ NodeBreak // A break action.
++ NodeContinue // A continue action.
+ )
+
+ // Nodes.
+@@ -907,6 +909,40 @@ func (i *IfNode) Copy() Node {
+ return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+ }
+
++// BreakNode represents a {{break}} action.
++type BreakNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newBreak(pos Pos, line int) *BreakNode {
++ return &BreakNode{tr: t, NodeType: NodeBreak, Pos: pos, Line: line}
++}
++
++func (b *BreakNode) Copy() Node { return b.tr.newBreak(b.Pos, b.Line) }
++func (b *BreakNode) String() string { return "{{break}}" }
++func (b *BreakNode) tree() *Tree { return b.tr }
++func (b *BreakNode) writeTo(sb *strings.Builder) { sb.WriteString("{{break}}") }
++
++// ContinueNode represents a {{continue}} action.
++type ContinueNode struct {
++ tr *Tree
++ NodeType
++ Pos
++ Line int
++}
++
++func (t *Tree) newContinue(pos Pos, line int) *ContinueNode {
++ return &ContinueNode{tr: t, NodeType: NodeContinue, Pos: pos, Line: line}
++}
++
++func (c *ContinueNode) Copy() Node { return c.tr.newContinue(c.Pos, c.Line) }
++func (c *ContinueNode) String() string { return "{{continue}}" }
++func (c *ContinueNode) tree() *Tree { return c.tr }
++func (c *ContinueNode) writeTo(sb *strings.Builder) { sb.WriteString("{{continue}}") }
++
+ // RangeNode represents a {{range}} action and its commands.
+ type RangeNode struct {
+ BranchNode
+diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
+index 5e6e512..7f78b56 100644
+--- a/src/text/template/parse/parse.go
++++ b/src/text/template/parse/parse.go
+@@ -31,6 +31,7 @@ type Tree struct {
+ vars []string // variables defined at the moment.
+ treeSet map[string]*Tree
+ actionLine int // line of left delim starting action
++ rangeDepth int
+ mode Mode
+ }
+
+@@ -223,6 +224,8 @@ func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer, treeSet ma
+ t.vars = []string{"$"}
+ t.funcs = funcs
+ t.treeSet = treeSet
++ lex.breakOK = !t.hasFunction("break")
++ lex.continueOK = !t.hasFunction("continue")
+ }
+
+ // stopParse terminates parsing.
+@@ -385,6 +388,10 @@ func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemBlock:
+ return t.blockControl()
++ case itemBreak:
++ return t.breakControl(token.pos, token.line)
++ case itemContinue:
++ return t.continueControl(token.pos, token.line)
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+@@ -404,6 +411,32 @@ func (t *Tree) action() (n Node) {
+ return t.newAction(token.pos, token.line, t.pipeline("command", itemRightDelim))
+ }
+
++// Break:
++// {{break}}
++// Break keyword is past.
++func (t *Tree) breakControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{break}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{break}} outside {{range}}")
++ }
++ return t.newBreak(pos, line)
++}
++
++// Continue:
++// {{continue}}
++// Continue keyword is past.
++func (t *Tree) continueControl(pos Pos, line int) Node {
++ if token := t.next(); token.typ != itemRightDelim {
++ t.unexpected(token, "in {{continue}}")
++ }
++ if t.rangeDepth == 0 {
++ t.errorf("{{continue}} outside {{range}}")
++ }
++ return t.newContinue(pos, line)
++}
++
+ // Pipeline:
+ // declarations? command ('|' command)*
+ func (t *Tree) pipeline(context string, end itemType) (pipe *PipeNode) {
+@@ -479,8 +512,14 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) {
+ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ pipe = t.pipeline(context, itemRightDelim)
++ if context == "range" {
++ t.rangeDepth++
++ }
+ var next Node
+ list, next = t.itemList()
++ if context == "range" {
++ t.rangeDepth--
++ }
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+@@ -522,7 +561,8 @@ func (t *Tree) ifControl() Node {
+ // {{range pipeline}} itemList {{else}} itemList {{end}}
+ // Range keyword is past.
+ func (t *Tree) rangeControl() Node {
+- return t.newRange(t.parseControl(false, "range"))
++ r := t.newRange(t.parseControl(false, "range"))
++ return r
+ }
+
+ // With:
+diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
+index 220f984..ba45636 100644
+--- a/src/text/template/parse/parse_test.go
++++ b/src/text/template/parse/parse_test.go
+@@ -230,6 +230,10 @@ var parseTests = []parseTest{
+ `{{range $x := .SI}}{{.}}{{end}}`},
+ {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+ `{{range $x, $y := .SI}}{{.}}{{end}}`},
++ {"range with break", "{{range .SI}}{{.}}{{break}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{break}}{{end}}`},
++ {"range with continue", "{{range .SI}}{{.}}{{continue}}{{end}}", noError,
++ `{{range .SI}}{{.}}{{continue}}{{end}}`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+ `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+ {"template", "{{template `x`}}", noError,
+@@ -279,6 +283,10 @@ var parseTests = []parseTest{
+ {"adjacent args", "{{printf 3`x`}}", hasError, ""},
+ {"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+ {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
++ {"break outside range", "{{range .}}{{end}} {{break}}", hasError, ""},
++ {"continue outside range", "{{range .}}{{end}} {{continue}}", hasError, ""},
++ {"break in range else", "{{range .}}{{else}}{{break}}{{end}}", hasError, ""},
++ {"continue in range else", "{{range .}}{{else}}{{continue}}{{end}}", hasError, ""},
+ // Other kinds of assignments and operators aren't available yet.
+ {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+ {"bug0b", "{{$x += 1}}{{$x}}", hasError, ""},
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch
new file mode 100644
index 0000000000..baf400b891
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24538_6.patch
@@ -0,0 +1,371 @@
+From 16f4882984569f179d73967c9eee679bb9b098c5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Mon, 20 Mar 2023 11:01:13 -0700
+Subject: [PATCH 6/6] html/template: disallow actions in JS template literals
+
+ECMAScript 6 introduced template literals[0][1] which are delimited with
+backticks. These need to be escaped in a similar fashion to the
+delimiters for other string literals. Additionally template literals can
+contain special syntax for string interpolation.
+
+There is no clear way to allow safe insertion of actions within JS
+template literals, as handling (JS) string interpolation inside of these
+literals is rather complex. As such we've chosen to simply disallow
+template actions within these template literals.
+
+A new error code is added for this parsing failure case, errJsTmplLit,
+but it is unexported as it is not backwards compatible with other minor
+release versions to introduce an API change in a minor release. We will
+export this code in the next major release.
+
+The previous behavior (with the cavet that backticks are now escaped
+properly) can be re-enabled with GODEBUG=jstmpllitinterp=1.
+
+This change subsumes CL471455.
+
+Thanks to Sohom Datta, Manipal Institute of Technology, for reporting
+this issue.
+
+Fixes CVE-2023-24538
+For #59234
+Fixes #59271
+
+[0] https://tc39.es/ecma262/multipage/ecmascript-language-expressions.html#sec-template-literals
+[1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals
+
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802457
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1802612
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Change-Id: Ic7f10595615f2b2740d9c85ad7ef40dc0e78c04c
+Reviewed-on: https://go-review.googlesource.com/c/go/+/481987
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+
+Upstream-Status: Backport from https://github.com/golang/go/commit/b1e3ecfa06b67014429a197ec5e134ce4303ad9b
+CVE: CVE-2023-24538
+Signed-off-by: Shubham Kulkarni <skulkarni@mvista.com>
+---
+ src/html/template/context.go | 2 ++
+ src/html/template/error.go | 13 ++++++++
+ src/html/template/escape.go | 11 +++++++
+ src/html/template/escape_test.go | 66 ++++++++++++++++++++++-----------------
+ src/html/template/js.go | 2 ++
+ src/html/template/js_test.go | 2 +-
+ src/html/template/jsctx_string.go | 9 ++++++
+ src/html/template/state_string.go | 37 ++++++++++++++++++++--
+ src/html/template/transition.go | 7 ++++-
+ 9 files changed, 116 insertions(+), 33 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index f7d4849..0b65313 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -116,6 +116,8 @@ const (
+ stateJSDqStr
+ // stateJSSqStr occurs inside a JavaScript single quoted string.
+ stateJSSqStr
++ // stateJSBqStr occurs inside a JavaScript back quoted string.
++ stateJSBqStr
+ // stateJSRegexp occurs inside a JavaScript regexp literal.
+ stateJSRegexp
+ // stateJSBlockCmt occurs inside a JavaScript /* block comment */.
+diff --git a/src/html/template/error.go b/src/html/template/error.go
+index 0e52706..fd26b64 100644
+--- a/src/html/template/error.go
++++ b/src/html/template/error.go
+@@ -211,6 +211,19 @@ const (
+ // pipeline occurs in an unquoted attribute value context, "html" is
+ // disallowed. Avoid using "html" and "urlquery" entirely in new templates.
+ ErrPredefinedEscaper
++
++ // errJSTmplLit: "... appears in a JS template literal"
++ // Example:
++ // <script>var tmpl = `{{.Interp}`</script>
++ // Discussion:
++ // Package html/template does not support actions inside of JS template
++ // literals.
++ //
++ // TODO(rolandshoemaker): we cannot add this as an exported error in a minor
++ // release, since it is backwards incompatible with the other minor
++ // releases. As such we need to leave it unexported, and then we'll add it
++ // in the next major release.
++ errJSTmplLit
+ )
+
+ func (e *Error) Error() string {
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index f12dafa..29ca5b3 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -8,6 +8,7 @@ import (
+ "bytes"
+ "fmt"
+ "html"
++ "internal/godebug"
+ "io"
+ "text/template"
+ "text/template/parse"
+@@ -203,6 +204,16 @@ func (e *escaper) escapeAction(c context, n *parse.ActionNode) context {
+ c.jsCtx = jsCtxDivOp
+ case stateJSDqStr, stateJSSqStr:
+ s = append(s, "_html_template_jsstrescaper")
++ case stateJSBqStr:
++ debugAllowActionJSTmpl := godebug.Get("jstmpllitinterp")
++ if debugAllowActionJSTmpl == "1" {
++ s = append(s, "_html_template_jsstrescaper")
++ } else {
++ return context{
++ state: stateError,
++ err: errorf(errJSTmplLit, n, n.Line, "%s appears in a JS template literal", n),
++ }
++ }
+ case stateJSRegexp:
+ s = append(s, "_html_template_jsregexpescaper")
+ case stateCSS:
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index fa2b84a..1b150e9 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -681,35 +681,31 @@ func TestEscape(t *testing.T) {
+ }
+
+ for _, test := range tests {
+- tmpl := New(test.name)
+- tmpl = Must(tmpl.Parse(test.input))
+- // Check for bug 6459: Tree field was not set in Parse.
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree not set properly", test.name)
+- continue
+- }
+- b := new(bytes.Buffer)
+- if err := tmpl.Execute(b, data); err != nil {
+- t.Errorf("%s: template execution failed: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- b.Reset()
+- if err := tmpl.Execute(b, pdata); err != nil {
+- t.Errorf("%s: template execution failed for pointer: %s", test.name, err)
+- continue
+- }
+- if w, g := test.output, b.String(); w != g {
+- t.Errorf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
+- continue
+- }
+- if tmpl.Tree != tmpl.text.Tree {
+- t.Errorf("%s: tree mismatch", test.name)
+- continue
+- }
++ t.Run(test.name, func(t *testing.T) {
++ tmpl := New(test.name)
++ tmpl = Must(tmpl.Parse(test.input))
++ // Check for bug 6459: Tree field was not set in Parse.
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree not set properly", test.name)
++ }
++ b := new(strings.Builder)
++ if err := tmpl.Execute(b, data); err != nil {
++ t.Fatalf("%s: template execution failed: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ b.Reset()
++ if err := tmpl.Execute(b, pdata); err != nil {
++ t.Fatalf("%s: template execution failed for pointer: %s", test.name, err)
++ }
++ if w, g := test.output, b.String(); w != g {
++ t.Fatalf("%s: escaped output for pointer: want\n\t%q\ngot\n\t%q", test.name, w, g)
++ }
++ if tmpl.Tree != tmpl.text.Tree {
++ t.Fatalf("%s: tree mismatch", test.name)
++ }
++ })
+ }
+ }
+
+@@ -936,6 +932,10 @@ func TestErrors(t *testing.T) {
+ "{{range .Items}}<a{{if .X}}{{end}}>{{if .X}}{{break}}{{end}}{{end}}",
+ "",
+ },
++ {
++ "<script>var a = `${a+b}`</script>`",
++ "",
++ },
+ // Error cases.
+ {
+ "{{if .Cond}}<a{{end}}",
+@@ -1082,6 +1082,10 @@ func TestErrors(t *testing.T) {
+ // html is allowed since it is the last command in the pipeline, but urlquery is not.
+ `predefined escaper "urlquery" disallowed in template`,
+ },
++ {
++ "<script>var tmpl = `asd {{.}}`;</script>",
++ `{{.}} appears in a JS template literal`,
++ },
+ }
+ for _, test := range tests {
+ buf := new(bytes.Buffer)
+@@ -1304,6 +1308,10 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+ {
++ "<a onclick=\"`foo",
++ context{state: stateJSBqStr, delim: delimDoubleQuote, attr: attrScript},
++ },
++ {
+ `<A ONCLICK="'`,
+ context{state: stateJSSqStr, delim: delimDoubleQuote, attr: attrScript},
+ },
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index ea9c183..b888eaf 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -308,6 +308,7 @@ var jsStrReplacementTable = []string{
+ // Encode HTML specials as hex so the output can be embedded
+ // in HTML attributes without further encoding.
+ '"': `\u0022`,
++ '`': `\u0060`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
+ '+': `\u002b`,
+@@ -331,6 +332,7 @@ var jsStrNormReplacementTable = []string{
+ '"': `\u0022`,
+ '&': `\u0026`,
+ '\'': `\u0027`,
++ '`': `\u0060`,
+ '+': `\u002b`,
+ '/': `\/`,
+ '<': `\u003c`,
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index d7ee47b..7d963ae 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -292,7 +292,7 @@ func TestEscapersOnLower7AndSelectHighCodepoints(t *testing.T) {
+ `0123456789:;\u003c=\u003e?` +
+ `@ABCDEFGHIJKLMNO` +
+ `PQRSTUVWXYZ[\\]^_` +
+- "`abcdefghijklmno" +
++ "\\u0060abcdefghijklmno" +
+ "pqrstuvwxyz{|}~\u007f" +
+ "\u00A0\u0100\\u2028\\u2029\ufeff\U0001D11E",
+ },
+diff --git a/src/html/template/jsctx_string.go b/src/html/template/jsctx_string.go
+index dd1d87e..2394893 100644
+--- a/src/html/template/jsctx_string.go
++++ b/src/html/template/jsctx_string.go
+@@ -4,6 +4,15 @@ package template
+
+ import "strconv"
+
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[jsCtxRegexp-0]
++ _ = x[jsCtxDivOp-1]
++ _ = x[jsCtxUnknown-2]
++}
++
+ const _jsCtx_name = "jsCtxRegexpjsCtxDivOpjsCtxUnknown"
+
+ var _jsCtx_index = [...]uint8{0, 11, 21, 33}
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..6fb1a6e 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -4,9 +4,42 @@ package template
+
+ import "strconv"
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateError"
++func _() {
++ // An "invalid array index" compiler error signifies that the constant values have changed.
++ // Re-run the stringer command to generate them again.
++ var x [1]struct{}
++ _ = x[stateText-0]
++ _ = x[stateTag-1]
++ _ = x[stateAttrName-2]
++ _ = x[stateAfterName-3]
++ _ = x[stateBeforeValue-4]
++ _ = x[stateHTMLCmt-5]
++ _ = x[stateRCDATA-6]
++ _ = x[stateAttr-7]
++ _ = x[stateURL-8]
++ _ = x[stateSrcset-9]
++ _ = x[stateJS-10]
++ _ = x[stateJSDqStr-11]
++ _ = x[stateJSSqStr-12]
++ _ = x[stateJSBqStr-13]
++ _ = x[stateJSRegexp-14]
++ _ = x[stateJSBlockCmt-15]
++ _ = x[stateJSLineCmt-16]
++ _ = x[stateCSS-17]
++ _ = x[stateCSSDqStr-18]
++ _ = x[stateCSSSqStr-19]
++ _ = x[stateCSSDqURL-20]
++ _ = x[stateCSSSqURL-21]
++ _ = x[stateCSSURL-22]
++ _ = x[stateCSSBlockCmt-23]
++ _ = x[stateCSSLineCmt-24]
++ _ = x[stateError-25]
++ _ = x[stateDead-26]
++}
++
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 155, 170, 184, 192, 205, 218, 231, 244, 255, 271, 286, 296}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 06df679..92eb351 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -27,6 +27,7 @@ var transitionFunc = [...]func(context, []byte) (context, int){
+ stateJS: tJS,
+ stateJSDqStr: tJSDelimited,
+ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
+ stateJSRegexp: tJSDelimited,
+ stateJSBlockCmt: tBlockCmt,
+ stateJSLineCmt: tLineCmt,
+@@ -262,7 +263,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, `"'/`)
++ i := bytes.IndexAny(s, "\"`'/")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -274,6 +275,8 @@ func tJS(c context, s []byte) (context, int) {
+ c.state, c.jsCtx = stateJSDqStr, jsCtxRegexp
+ case '\'':
+ c.state, c.jsCtx = stateJSSqStr, jsCtxRegexp
++ case '`':
++ c.state, c.jsCtx = stateJSBqStr, jsCtxRegexp
+ case '/':
+ switch {
+ case i+1 < len(s) && s[i+1] == '/':
+@@ -303,6 +306,8 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ switch c.state {
+ case stateJSSqStr:
+ specials = `\'`
++ case stateJSBqStr:
++ specials = "`\\"
+ case stateJSRegexp:
+ specials = `\/[]`
+ }
+--
+2.7.4
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch
new file mode 100644
index 0000000000..281b6486a8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24539.patch
@@ -0,0 +1,60 @@
+From 8673ca81e5340b87709db2d9749c92a3bf925df1 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 15:40:44 -0700
+Subject: [PATCH] html/template: disallow angle brackets in CSS values
+
+Angle brackets should not appear in CSS contexts, as they may affect
+token boundaries (such as closing a <style> tag, resulting in
+injection). Instead emit filterFailsafe, matching the behavior for other
+dangerous characters.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #59720
+Fixes CVE-2023-24539
+
+Change-Id: Iccc659c9a18415992b0c05c178792228e3a7bae4
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826636
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491615
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/8673ca81e5340b87709db2d9749c92a3bf925df1]
+CVE: CVE-2023-24539
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ src/html/template/css.go | 2 +-
+ src/html/template/css_test.go | 2 ++
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/html/template/css.go b/src/html/template/css.go
+index 890a0c6b227fe..f650d8b3e843a 100644
+--- a/src/html/template/css.go
++++ b/src/html/template/css.go
+@@ -238,7 +238,7 @@ func cssValueFilter(args ...any) string {
+ // inside a string that might embed JavaScript source.
+ for i, c := range b {
+ switch c {
+- case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}':
++ case 0, '"', '\'', '(', ')', '/', ';', '@', '[', '\\', ']', '`', '{', '}', '<', '>':
+ return filterFailsafe
+ case '-':
+ // Disallow <!-- or -->.
+diff --git a/src/html/template/css_test.go b/src/html/template/css_test.go
+index a735638b0314f..2b76256a766e9 100644
+--- a/src/html/template/css_test.go
++++ b/src/html/template/css_test.go
+@@ -231,6 +231,8 @@ func TestCSSValueFilter(t *testing.T) {
+ {`-exp\000052 ession(alert(1337))`, "ZgotmplZ"},
+ {`-expre\0000073sion`, "-expre\x073sion"},
+ {`@import url evil.css`, "ZgotmplZ"},
++ {"<", "ZgotmplZ"},
++ {">", "ZgotmplZ"},
+ }
+ for _, test := range tests {
+ got := cssValueFilter(test.css)
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch
new file mode 100644
index 0000000000..799a0dfcda
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-24540.patch
@@ -0,0 +1,90 @@
+From ce7bd33345416e6d8cac901792060591cafc2797 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Tue, 11 Apr 2023 16:27:43 +0100
+Subject: [PATCH] [release-branch.go1.19] html/template: handle all JS
+ whitespace characters
+
+Rather than just a small set. Character class as defined by \s [0].
+
+Thanks to Juho Nurminen of Mattermost for reporting this.
+
+For #59721
+Fixes #59813
+Fixes CVE-2023-24540
+
+[0] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions/Character_Classes
+
+Change-Id: I56d4fa1ef08125b417106ee7dbfb5b0923b901ba
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1821459
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1851497
+Run-TryBot: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+TryBot-Bypass: Carlos Amedee <carlos@golang.org>
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/ce7bd33345416e6d8cac901792060591cafc2797]
+CVE: CVE-2023-24540
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/html/template/js.go | 8 +++++++-
+ src/html/template/js_test.go | 11 +++++++----
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index fe7054efe5cd8..4e05c1455723f 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -13,6 +13,11 @@ import (
+ "unicode/utf8"
+ )
+
++// jsWhitespace contains all of the JS whitespace characters, as defined
++// by the \s character class.
++// See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_expressions/Character_classes.
++const jsWhitespace = "\f\n\r\t\v\u0020\u00a0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000\ufeff"
++
+ // nextJSCtx returns the context that determines whether a slash after the
+ // given run of tokens starts a regular expression instead of a division
+ // operator: / or /=.
+@@ -26,7 +31,8 @@ import (
+ // JavaScript 2.0 lexical grammar and requires one token of lookbehind:
+ // https://www.mozilla.org/js/language/js20-2000-07/rationale/syntax.html
+ func nextJSCtx(s []byte, preceding jsCtx) jsCtx {
+- s = bytes.TrimRight(s, "\t\n\f\r \u2028\u2029")
++ // Trim all JS whitespace characters
++ s = bytes.TrimRight(s, jsWhitespace)
+ if len(s) == 0 {
+ return preceding
+ }
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index e07c695f7a77d..e52180cc113b5 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -81,14 +81,17 @@ func TestNextJsCtx(t *testing.T) {
+ {jsCtxDivOp, "0"},
+ // Dots that are part of a number are div preceders.
+ {jsCtxDivOp, "0."},
++ // Some JS interpreters treat NBSP as a normal space, so
++ // we must too in order to properly escape things.
++ {jsCtxRegexp, "=\u00A0"},
+ }
+
+ for _, test := range tests {
+- if nextJSCtx([]byte(test.s), jsCtxRegexp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxRegexp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+- if nextJSCtx([]byte(test.s), jsCtxDivOp) != test.jsCtx {
+- t.Errorf("want %s got %q", test.jsCtx, test.s)
++ if ctx := nextJSCtx([]byte(test.s), jsCtxDivOp); ctx != test.jsCtx {
++ t.Errorf("%q: want %s got %s", test.s, test.jsCtx, ctx)
+ }
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch
new file mode 100644
index 0000000000..092c7aa0ff
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29400.patch
@@ -0,0 +1,94 @@
+From 0d347544cbca0f42b160424f6bc2458ebcc7b3fc Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 13 Apr 2023 14:01:50 -0700
+Subject: [PATCH] html/template: emit filterFailsafe for empty unquoted attr
+ value
+
+An unquoted action used as an attribute value can result in unsafe
+behavior if it is empty, as HTML normalization will result in unexpected
+attributes, and may allow attribute injection. If executing a template
+results in a empty unquoted attribute value, emit filterFailsafe
+instead.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes #59722
+Fixes CVE-2023-29400
+
+Change-Id: Ia38d1b536ae2b4af5323a6c6d861e3c057c2570a
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1826631
+Reviewed-by: Julie Qiu <julieqiu@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/491617
+Run-TryBot: Carlos Amedee <carlos@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/0d347544cbca0f42b160424f6bc2458ebcc7b3fc]
+CVE: CVE-2023-29400
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+---
+ src/html/template/escape.go | 5 ++---
+ src/html/template/escape_test.go | 15 +++++++++++++++
+ src/html/template/html.go | 3 +++
+ 3 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 4ba1d6b31897e..a62ef159f0dcd 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -382,9 +382,8 @@ func normalizeEscFn(e string) string {
+ // for all x.
+ var redundantFuncs = map[string]map[string]bool{
+ "_html_template_commentescaper": {
+- "_html_template_attrescaper": true,
+- "_html_template_nospaceescaper": true,
+- "_html_template_htmlescaper": true,
++ "_html_template_attrescaper": true,
++ "_html_template_htmlescaper": true,
+ },
+ "_html_template_cssescaper": {
+ "_html_template_attrescaper": true,
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 3dd212bac9406..f8b2b448f2dfa 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -678,6 +678,21 @@ func TestEscape(t *testing.T) {
+ `<img srcset={{",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"}}>`,
+ `<img srcset=,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,>`,
+ },
++ {
++ "unquoted empty attribute value (plaintext)",
++ "<p name={{.U}}>",
++ "<p name=ZgotmplZ>",
++ },
++ {
++ "unquoted empty attribute value (url)",
++ "<p href={{.U}}>",
++ "<p href=ZgotmplZ>",
++ },
++ {
++ "quoted empty attribute value",
++ "<p name=\"{{.U}}\">",
++ "<p name=\"\">",
++ },
+ }
+
+ for _, test := range tests {
+diff --git a/src/html/template/html.go b/src/html/template/html.go
+index bcca0b51a0ef9..a181699a5bda8 100644
+--- a/src/html/template/html.go
++++ b/src/html/template/html.go
+@@ -14,6 +14,9 @@ import (
+ // htmlNospaceEscaper escapes for inclusion in unquoted attribute values.
+ func htmlNospaceEscaper(args ...interface{}) string {
+ s, t := stringify(args...)
++ if s == "" {
++ return filterFailsafe
++ }
+ if t == contentTypeHTML {
+ return htmlReplacer(stripTags(s), htmlNospaceNormReplacementTable, false)
+ }
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch
new file mode 100644
index 0000000000..01eed9fe1b
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29402.patch
@@ -0,0 +1,201 @@
+rom c160b49b6d328c86bd76ca2fff9009a71347333f Mon Sep 17 00:00:00 2001
+From: "Bryan C. Mills" <bcmills@google.com>
+Date: Fri, 12 May 2023 14:15:16 -0400
+Subject: [PATCH] [release-branch.go1.19] cmd/go: disallow package directories
+ containing newlines
+
+Directory or file paths containing newlines may cause tools (such as
+cmd/cgo) that emit "//line" or "#line" -directives to write part of
+the path into non-comment lines in generated source code. If those
+lines contain valid Go code, it may be injected into the resulting
+binary.
+
+(Note that Go import paths and file paths within module zip files
+already could not contain newlines.)
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60167.
+Fixes #60515.
+Fixes CVE-2023-29402.
+
+Change-Id: If55d0400c02beb7a5da5eceac60f1abeac99f064
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1882606
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Russ Cox <rsc@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 41f9046495564fc728d6f98384ab7276450ac7e2)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902229
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904343
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-by: Bryan Mills <bcmills@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501218
+Run-TryBot: David Chase <drchase@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c160b49b6d328c86bd76ca2fff9009a71347333f]
+CVE: CVE-2023-29402
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/cmd/go/internal/load/pkg.go | 4 +
+ src/cmd/go/internal/work/exec.go | 6 ++
+ src/cmd/go/script_test.go | 1 +
+ .../go/testdata/script/build_cwd_newline.txt | 100 ++++++++++++++++++
+ 4 files changed, 111 insertions(+)
+ create mode 100644 src/cmd/go/testdata/script/build_cwd_newline.txt
+
+diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
+index 369a79b..d2b63b0 100644
+--- a/src/cmd/go/internal/load/pkg.go
++++ b/src/cmd/go/internal/load/pkg.go
+@@ -1697,6 +1697,10 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) {
+ setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath))
+ return
+ }
++ if strings.ContainsAny(p.Dir, "\r\n") {
++ setError(fmt.Errorf("invalid package directory %q", p.Dir))
++ return
++ }
+
+ // Build list of imported packages and full dependency list.
+ imports := make([]*Package, 0, len(p.Imports))
+diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
+index 9a9650b..050b785 100644
+--- a/src/cmd/go/internal/work/exec.go
++++ b/src/cmd/go/internal/work/exec.go
+@@ -458,6 +458,12 @@ func (b *Builder) build(a *Action) (err error) {
+ b.Print(a.Package.ImportPath + "\n")
+ }
+
++ if p.Error != nil {
++ // Don't try to build anything for packages with errors. There may be a
++ // problem with the inputs that makes the package unsafe to build.
++ return p.Error
++ }
++
+ if a.Package.BinaryOnly {
+ p.Stale = true
+ p.StaleReason = "binary-only packages are no longer supported"
+diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
+index ec498bb..a1398ad 100644
+--- a/src/cmd/go/script_test.go
++++ b/src/cmd/go/script_test.go
+@@ -123,6 +123,7 @@ func (ts *testScript) setup() {
+ "devnull=" + os.DevNull,
+ "goversion=" + goVersion(ts),
+ ":=" + string(os.PathListSeparator),
++ "newline=\n",
+ }
+
+ if runtime.GOOS == "plan9" {
+diff --git a/src/cmd/go/testdata/script/build_cwd_newline.txt b/src/cmd/go/testdata/script/build_cwd_newline.txt
+new file mode 100644
+index 0000000..61c6966
+--- /dev/null
++++ b/src/cmd/go/testdata/script/build_cwd_newline.txt
+@@ -0,0 +1,100 @@
++[windows] skip 'filesystem normalizes / to \'
++[plan9] skip 'filesystem disallows \n in paths'
++
++# If the directory path containing a package to be built includes a newline,
++# the go command should refuse to even try to build the package.
++
++env DIR=$WORK${/}${newline}'package main'${newline}'func main() { panic("uh-oh")'${newline}'/*'
++
++mkdir $DIR
++cd $DIR
++exec pwd
++cp $WORK/go.mod ./go.mod
++cp $WORK/main.go ./main.go
++cp $WORK/main_test.go ./main_test.go
++
++! go build -o $devnull .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go build -o $devnull main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go run .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go run main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++! go test .
++stderr 'package example: invalid package directory .*uh-oh'
++
++! go test -v main.go main_test.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++
++# Since we do preserve $PWD (or set it appropriately) for commands, and we do
++# not resolve symlinks unnecessarily, referring to the contents of the unsafe
++# directory via a safe symlink should be ok, and should not inject the data from
++# the symlink target path.
++
++[!symlink] stop 'remainder of test checks symlink behavior'
++[short] stop 'links and runs binaries'
++
++symlink $WORK${/}link -> $DIR
++
++go run $WORK${/}link${/}main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v $WORK${/}link${/}main.go $WORK${/}link${/}main_test.go
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++cd $WORK/link
++
++! go run $DIR${/}main.go
++stderr 'package command-line-arguments: invalid package directory .*uh-oh'
++
++go run .
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go run main.go
++! stdout panic
++! stderr panic
++stderr '^ok$'
++
++go test -v
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++go test -v .
++! stdout panic
++! stderr panic
++stdout '^ok$' # 'go test' combines the test's stdout into stderr
++
++
++-- $WORK/go.mod --
++module example
++go 1.19
++-- $WORK/main.go --
++package main
++
++import "C"
++
++func main() {
++ /* nothing here */
++ println("ok")
++}
++-- $WORK/main_test.go --
++package main
++
++import "testing"
++
++func TestMain(*testing.M) {
++ main()
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch
new file mode 100644
index 0000000000..61336ee9ee
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29404.patch
@@ -0,0 +1,84 @@
+From bf3c8ce03e175e870763901a3850bca01381a828 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Fri, 5 May 2023 13:10:34 -0700
+Subject: [PATCH] [release-branch.go1.19] cmd/go: enforce flags with
+ non-optional arguments
+
+Enforce that linker flags which expect arguments get them, otherwise it
+may be possible to smuggle unexpected flags through as the linker can
+consume what looks like a flag as an argument to a preceding flag (i.e.
+"-Wl,-O -Wl,-R,-bad-flag" is interpreted as "-O=-R -bad-flag"). Also be
+somewhat more restrictive in the general format of some flags.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60305
+Fixes #60511
+Fixes CVE-2023-29404
+
+Change-Id: Icdffef2c0f644da50261cace6f43742783931cff
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1876275
+Reviewed-by: Ian Lance Taylor <iant@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit 896779503cf754cbdac24b61d4cc953b50fe2dde)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902225
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904342
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501217
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+Run-TryBot: David Chase <drchase@google.com>
+TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf3c8ce03e175e870763901a3850bca01381a828]
+CVE: CVE-2023-29404
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/cmd/go/internal/work/security.go | 6 +++---
+ src/cmd/go/internal/work/security_test.go | 5 +++++
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
+index a823b20..8acb6dc 100644
+--- a/src/cmd/go/internal/work/security.go
++++ b/src/cmd/go/internal/work/security.go
+@@ -177,17 +177,17 @@ var validLinkerFlags = []*lazyregexp.Regexp{
+ re(`-Wl,-Bdynamic`),
+ re(`-Wl,-berok`),
+ re(`-Wl,-Bstatic`),
+- re(`-WL,-O([^@,\-][^,]*)?`),
++ re(`-Wl,-O[0-9]+`),
+ re(`-Wl,-d[ny]`),
+ re(`-Wl,--disable-new-dtags`),
+- re(`-Wl,-e[=,][a-zA-Z0-9]*`),
++ re(`-Wl,-e[=,][a-zA-Z0-9]+`),
+ re(`-Wl,--enable-new-dtags`),
+ re(`-Wl,--end-group`),
+ re(`-Wl,--(no-)?export-dynamic`),
+ re(`-Wl,-framework,[^,@\-][^,]+`),
+ re(`-Wl,-headerpad_max_install_names`),
+ re(`-Wl,--no-undefined`),
+- re(`-Wl,-R([^@\-][^,@]*$)`),
++ re(`-Wl,-R,?([^@\-,][^,@]*$)`),
+ re(`-Wl,--just-symbols[=,]([^,@\-][^,@]+)`),
+ re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-s`),
+diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go
+index bd707ff..7b0b7d3 100644
+--- a/src/cmd/go/internal/work/security_test.go
++++ b/src/cmd/go/internal/work/security_test.go
+@@ -220,6 +220,11 @@ var badLinkerFlags = [][]string{
+ {"-Wl,-R,@foo"},
+ {"-Wl,--just-symbols,@foo"},
+ {"../x.o"},
++ {"-Wl,-R,"},
++ {"-Wl,-O"},
++ {"-Wl,-e="},
++ {"-Wl,-e,"},
++ {"-Wl,-R,-flag"},
+ }
+
+ func TestCheckLinkerFlags(t *testing.T) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch
new file mode 100644
index 0000000000..70d50cc08a
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-1.patch
@@ -0,0 +1,112 @@
+From fa60c381ed06c12f9c27a7b50ca44c5f84f7f0f4 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Thu, 4 May 2023 14:06:39 -0700
+Subject: [PATCH] [release-branch.go1.20] cmd/go,cmd/cgo: in _cgo_flags use one
+ line per flag
+
+The flags that we recorded in _cgo_flags did not use any quoting,
+so a flag containing embedded spaces was mishandled.
+Change the _cgo_flags format to put each flag on a separate line.
+That is a simple format that does not require any quoting.
+
+As far as I can tell only cmd/go uses _cgo_flags, and it is only
+used for gccgo. If this patch doesn't cause any trouble, then
+in the next release we can change to only using _cgo_flags for gccgo.
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Updates #60306
+Fixes #60514
+Fixes CVE-2023-29405
+
+Change-Id: I36b6e188a44c80d7b9573efa577c386770bd2ba3
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1875094
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit bcdfcadd5612212089d958bc352a6f6c90742dcc)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1902228
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1904345
+Reviewed-by: Michael Knyszek <mknyszek@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501220
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: David Chase <drchase@google.com>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+---
+Upstream-Status: Backport [https://github.com/golang/go/commit/fa60c381ed06c12f9c27a7b50ca44c5f84f7f0f4]
+CVE: CVE-2023-29405
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ src/cmd/cgo/out.go | 4 +++-
+ src/cmd/go/internal/work/gccgo.go | 14 ++++++-------
+ .../go/testdata/script/gccgo_link_ldflags.txt | 20 +++++++++++++++++++
+ 3 files changed, 29 insertions(+), 9 deletions(-)
+ create mode 100644 src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+
+diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
+index d26f9e76a374a..d0c6fe3d4c2c2 100644
+--- a/src/cmd/cgo/out.go
++++ b/src/cmd/cgo/out.go
+@@ -47,7 +47,9 @@ func (p *Package) writeDefs() {
+
+ fflg := creat(*objDir + "_cgo_flags")
+ for k, v := range p.CgoFlags {
+- fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, strings.Join(v, " "))
++ for _, arg := range v {
++ fmt.Fprintf(fflg, "_CGO_%s=%s\n", arg)
++ }
+ if k == "LDFLAGS" && !*gccgo {
+ for _, arg := range v {
+ fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg)
+diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
+index 08a4c2d8166c7..a048b7f4eecef 100644
+--- a/src/cmd/go/internal/work/gccgo.go
++++ b/src/cmd/go/internal/work/gccgo.go
+@@ -280,14 +280,12 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string
+ const ldflagsPrefix = "_CGO_LDFLAGS="
+ for _, line := range strings.Split(string(flags), "\n") {
+ if strings.HasPrefix(line, ldflagsPrefix) {
+- newFlags := strings.Fields(line[len(ldflagsPrefix):])
+- for _, flag := range newFlags {
+- // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
+- // but they don't mean anything to the linker so filter
+- // them out.
+- if flag != "-g" && !strings.HasPrefix(flag, "-O") {
+- cgoldflags = append(cgoldflags, flag)
+- }
++ flag := line[len(ldflagsPrefix):]
++ // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
++ // but they don't mean anything to the linker so filter
++ // them out.
++ if flag != "-g" && !strings.HasPrefix(flag, "-O") {
++ cgoldflags = append(cgoldflags, flag)
+ }
+ }
+ }
+diff --git a/src/cmd/go/testdata/script/gccgo_link_ldflags.txt b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+new file mode 100644
+index 0000000000000..4e91ae56505b6
+--- /dev/null
++++ b/src/cmd/go/testdata/script/gccgo_link_ldflags.txt
+@@ -0,0 +1,20 @@
++# Test that #cgo LDFLAGS are properly quoted.
++# The #cgo LDFLAGS below should pass a string with spaces to -L,
++# as though searching a directory with a space in its name.
++# It should not pass --nosuchoption to the external linker.
++
++[!cgo] skip
++
++go build
++
++[!exec:gccgo] skip
++
++go build -compiler gccgo
++
++-- go.mod --
++module m
++-- cgo.go --
++package main
++// #cgo LDFLAGS: -L "./ -Wl,--nosuchoption"
++import "C"
++func main() {}
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch
new file mode 100644
index 0000000000..369eca581e
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29405-2.patch
@@ -0,0 +1,38 @@
+From 1008486a9ff979dbd21c7466eeb6abf378f9c637 Mon Sep 17 00:00:00 2001
+From: Ian Lance Taylor <iant@golang.org>
+Date: Tue, 6 Jun 2023 12:51:17 -0700
+Subject: [PATCH] [release-branch.go1.20] cmd/cgo: correct _cgo_flags output
+
+For #60306
+For #60514
+
+Change-Id: I3f5d14aee7d7195030e8872e42b1d97aa11d3582
+Reviewed-on: https://go-review.googlesource.com/c/go/+/501298
+Run-TryBot: Ian Lance Taylor <iant@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: David Chase <drchase@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
+---
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/1008486a9ff979dbd21c7466eeb6abf378f9c637]
+CVE: CVE-2023-29405
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+
+ src/cmd/cgo/out.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
+index d0c6fe3d4c2c2..a48f52105628a 100644
+--- a/src/cmd/cgo/out.go
++++ b/src/cmd/cgo/out.go
+@@ -48,7 +48,7 @@ func (p *Package) writeDefs() {
+ fflg := creat(*objDir + "_cgo_flags")
+ for k, v := range p.CgoFlags {
+ for _, arg := range v {
+- fmt.Fprintf(fflg, "_CGO_%s=%s\n", arg)
++ fmt.Fprintf(fflg, "_CGO_%s=%s\n", k, arg)
+ }
+ if k == "LDFLAGS" && !*gccgo {
+ for _, arg := range v {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch
new file mode 100644
index 0000000000..080def4682
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-1.patch
@@ -0,0 +1,212 @@
+From 5fa6923b1ea891400153d04ddf1545e23b40041b Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 28 Jun 2023 13:20:08 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: validate Host header before
+ sending
+
+Verify that the Host header we send is valid.
+Avoids surprising behavior such as a Host of "go.dev\r\nX-Evil:oops"
+adding an X-Evil header to HTTP/1 requests.
+
+Add a test, skip the test for HTTP/2. HTTP/2 is not vulnerable to
+header injection in the way HTTP/1 is, but x/net/http2 doesn't validate
+the header and will go into a retry loop when the server rejects it.
+CL 506995 adds the necessary validation to x/net/http2.
+
+Updates #60374
+Fixes #61075
+For CVE-2023-29406
+
+Change-Id: I05cb6866a9bead043101954dfded199258c6dd04
+Reviewed-on: https://go-review.googlesource.com/c/go/+/506996
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit 499458f7ca04087958987a33c2703c3ef03e27e2)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/507358
+Run-TryBot: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5fa6923b1ea891400153d04ddf1545e23b40041b]
+CVE: CVE-2023-29406
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/net/http/http_test.go | 29 ---------------------
+ src/net/http/request.go | 47 ++++++++--------------------------
+ src/net/http/request_test.go | 11 ++------
+ src/net/http/transport_test.go | 18 +++++++++++++
+ 4 files changed, 31 insertions(+), 74 deletions(-)
+
+diff --git a/src/net/http/http_test.go b/src/net/http/http_test.go
+index f4ea52d..ea38cb4 100644
+--- a/src/net/http/http_test.go
++++ b/src/net/http/http_test.go
+@@ -49,35 +49,6 @@ func TestForeachHeaderElement(t *testing.T) {
+ }
+ }
+
+-func TestCleanHost(t *testing.T) {
+- tests := []struct {
+- in, want string
+- }{
+- {"www.google.com", "www.google.com"},
+- {"www.google.com foo", "www.google.com"},
+- {"www.google.com/foo", "www.google.com"},
+- {" first character is a space", ""},
+- {"[1::6]:8080", "[1::6]:8080"},
+-
+- // Punycode:
+- {"гофер.рф/foo", "xn--c1ae0ajs.xn--p1ai"},
+- {"bücher.de", "xn--bcher-kva.de"},
+- {"bücher.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we convert to lowercase before punycode:
+- {"BÜCHER.de", "xn--bcher-kva.de"},
+- {"BÜCHER.de:8080", "xn--bcher-kva.de:8080"},
+- // Verify we normalize to NFC before punycode:
+- {"gophér.nfc", "xn--gophr-esa.nfc"}, // NFC input; no work needed
+- {"goph\u0065\u0301r.nfd", "xn--gophr-esa.nfd"}, // NFD input
+- }
+- for _, tt := range tests {
+- got := cleanHost(tt.in)
+- if tt.want != got {
+- t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want)
+- }
+- }
+-}
+-
+ // Test that cmd/go doesn't link in the HTTP server.
+ //
+ // This catches accidental dependencies between the HTTP transport and
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index cb2edd2..2706300 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -18,7 +18,6 @@ import (
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+- "net"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+@@ -26,7 +25,8 @@ import (
+ "strconv"
+ "strings"
+ "sync"
+-
++
++ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/idna"
+ )
+
+@@ -557,12 +557,19 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+- host := cleanHost(r.Host)
++ host := r.Host
+ if host == "" {
+ if r.URL == nil {
+ return errMissingHost
+ }
+- host = cleanHost(r.URL.Host)
++ host = r.URL.Host
++ }
++ host, err = httpguts.PunycodeHostPort(host)
++ if err != nil {
++ return err
++ }
++ if !httpguts.ValidHostHeader(host) {
++ return errors.New("http: invalid Host header")
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+@@ -717,38 +724,6 @@ func idnaASCII(v string) (string, error) {
+ return idna.Lookup.ToASCII(v)
+ }
+
+-// cleanHost cleans up the host sent in request's Host header.
+-//
+-// It both strips anything after '/' or ' ', and puts the value
+-// into Punycode form, if necessary.
+-//
+-// Ideally we'd clean the Host header according to the spec:
+-// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
+-// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
+-// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
+-// But practically, what we are trying to avoid is the situation in
+-// issue 11206, where a malformed Host header used in the proxy context
+-// would create a bad request. So it is enough to just truncate at the
+-// first offending character.
+-func cleanHost(in string) string {
+- if i := strings.IndexAny(in, " /"); i != -1 {
+- in = in[:i]
+- }
+- host, port, err := net.SplitHostPort(in)
+- if err != nil { // input was just a host
+- a, err := idnaASCII(in)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return a
+- }
+- a, err := idnaASCII(host)
+- if err != nil {
+- return in // garbage in, garbage out
+- }
+- return net.JoinHostPort(a, port)
+-}
+-
+ // removeZone removes IPv6 zone identifier from host.
+ // E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+ func removeZone(host string) string {
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index 461d66e..0d417ff 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -676,15 +676,8 @@ func TestRequestBadHost(t *testing.T) {
+ }
+ req.Host = "foo.com with spaces"
+ req.URL.Host = "foo.com with spaces"
+- req.Write(logWrites{t, &got})
+- want := []string{
+- "GET /after HTTP/1.1\r\n",
+- "Host: foo.com\r\n",
+- "User-Agent: " + DefaultUserAgent + "\r\n",
+- "\r\n",
+- }
+- if !reflect.DeepEqual(got, want) {
+- t.Errorf("Writes = %q\n Want = %q", got, want)
++ if err := req.Write(logWrites{t, &got}); err == nil {
++ t.Errorf("Writing request with invalid Host: succeded, want error")
+ }
+ }
+
+diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go
+index fa0c370..0afb6b9 100644
+--- a/src/net/http/transport_test.go
++++ b/src/net/http/transport_test.go
+@@ -6249,3 +6249,21 @@ func TestIssue32441(t *testing.T) {
+ t.Error(err)
+ }
+ }
++
++func TestRequestSanitization(t *testing.T) {
++ setParallel(t)
++ defer afterTest(t)
++
++ ts := newClientServerTest(t, h1Mode, HandlerFunc(func(rw ResponseWriter, req *Request) {
++ if h, ok := req.Header["X-Evil"]; ok {
++ t.Errorf("request has X-Evil header: %q", h)
++ }
++ })).ts
++ defer ts.Close()
++ req, _ := NewRequest("GET", ts.URL, nil)
++ req.Host = "go.dev\r\nX-Evil:evil"
++ resp, _ := ts.Client().Do(req)
++ if resp != nil {
++ resp.Body.Close()
++ }
++}
+--
+2.25.1
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch
new file mode 100644
index 0000000000..637f46a537
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29406-2.patch
@@ -0,0 +1,114 @@
+From c08a5fa413a34111c9a37fd9e545de27ab0978b1 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Wed, 19 Jul 2023 10:30:46 -0700
+Subject: [PATCH] [release-branch.go1.19] net/http: permit requests with
+ invalid Host headers
+
+Historically, the Transport has silently truncated invalid
+Host headers at the first '/' or ' ' character. CL 506996 changed
+this behavior to reject invalid Host headers entirely.
+Unfortunately, Docker appears to rely on the previous behavior.
+
+When sending a HTTP/1 request with an invalid Host, send an empty
+Host header. This is safer than truncation: If you care about the
+Host, then you should get the one you set; if you don't care,
+then an empty Host should be fine.
+
+Continue to fully validate Host headers sent to a proxy,
+since proxies generally can't productively forward requests
+without a Host.
+
+For #60374
+Fixes #61431
+Fixes #61825
+
+Change-Id: If170c7dd860aa20eb58fe32990fc93af832742b6
+Reviewed-on: https://go-review.googlesource.com/c/go/+/511155
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Run-TryBot: Damien Neil <dneil@google.com>
+(cherry picked from commit b9153f6ef338baee5fe02a867c8fbc83a8b29dd1)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/518855
+Auto-Submit: Dmitri Shuralyov <dmitshur@google.com>
+Run-TryBot: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Russ Cox <rsc@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c08a5fa413a34111c9a37fd9e545de27ab0978b1]
+CVE: CVE-2023-29406
+Signed-off-by: Ming Liu <liu.ming50@gmail.com>
+---
+ src/net/http/request.go | 23 ++++++++++++++++++++++-
+ src/net/http/request_test.go | 17 ++++++++++++-----
+ 2 files changed, 34 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/request.go b/src/net/http/request.go
+index 3100037386..91cb8a66b9 100644
+--- a/src/net/http/request.go
++++ b/src/net/http/request.go
+@@ -582,8 +582,29 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF
+ if err != nil {
+ return err
+ }
++ // Validate that the Host header is a valid header in general,
++ // but don't validate the host itself. This is sufficient to avoid
++ // header or request smuggling via the Host field.
++ // The server can (and will, if it's a net/http server) reject
++ // the request if it doesn't consider the host valid.
+ if !httpguts.ValidHostHeader(host) {
+- return errors.New("http: invalid Host header")
++ // Historically, we would truncate the Host header after '/' or ' '.
++ // Some users have relied on this truncation to convert a network
++ // address such as Unix domain socket path into a valid, ignored
++ // Host header (see https://go.dev/issue/61431).
++ //
++ // We don't preserve the truncation, because sending an altered
++ // header field opens a smuggling vector. Instead, zero out the
++ // Host header entirely if it isn't valid. (An empty Host is valid;
++ // see RFC 9112 Section 3.2.)
++ //
++ // Return an error if we're sending to a proxy, since the proxy
++ // probably can't do anything useful with an empty Host header.
++ if !usingProxy {
++ host = ""
++ } else {
++ return errors.New("http: invalid Host header")
++ }
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go
+index fddc85d6a9..dd1e2dc2a1 100644
+--- a/src/net/http/request_test.go
++++ b/src/net/http/request_test.go
+@@ -770,16 +770,23 @@ func TestRequestWriteBufferedWriter(t *testing.T) {
+ }
+ }
+
+-func TestRequestBadHost(t *testing.T) {
++func TestRequestBadHostHeader(t *testing.T) {
+ got := []string{}
+ req, err := NewRequest("GET", "http://foo/after", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+- req.Host = "foo.com with spaces"
+- req.URL.Host = "foo.com with spaces"
+- if err := req.Write(logWrites{t, &got}); err == nil {
+- t.Errorf("Writing request with invalid Host: succeded, want error")
++ req.Host = "foo.com\nnewline"
++ req.URL.Host = "foo.com\nnewline"
++ req.Write(logWrites{t, &got})
++ want := []string{
++ "GET /after HTTP/1.1\r\n",
++ "Host: \r\n",
++ "User-Agent: " + DefaultUserAgent + "\r\n",
++ "\r\n",
++ }
++ if !reflect.DeepEqual(got, want) {
++ t.Errorf("Writes = %q\n Want = %q", got, want)
+ }
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch
new file mode 100644
index 0000000000..00685cc180
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-29409.patch
@@ -0,0 +1,175 @@
+From 2300f7ef07718f6be4d8aa8486c7de99836e233f Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 7 Jun 2023 15:27:13 -0700
+Subject: [PATCH] [release-branch.go1.19] crypto/tls: restrict RSA keys in
+ certificates to <= 8192 bits
+
+Extremely large RSA keys in certificate chains can cause a client/server
+to expend significant CPU time verifying signatures. Limit this by
+restricting the size of RSA keys transmitted during handshakes to <=
+8192 bits.
+
+Based on a survey of publicly trusted RSA keys, there are currently only
+three certificates in circulation with keys larger than this, and all
+three appear to be test certificates that are not actively deployed. It
+is possible there are larger keys in use in private PKIs, but we target
+the web PKI, so causing breakage here in the interests of increasing the
+default safety of users of crypto/tls seems reasonable.
+
+Thanks to Mateusz Poliwczak for reporting this issue.
+
+Updates #61460
+Fixes #61579
+Fixes CVE-2023-29409
+
+Change-Id: Ie35038515a649199a36a12fc2c5df3af855dca6c
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1912161
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit d865c715d92887361e4bd5596e19e513f27781b7)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1965487
+Reviewed-on: https://go-review.googlesource.com/c/go/+/514915
+Run-TryBot: David Chase <drchase@google.com>
+Reviewed-by: Matthew Dempsky <mdempsky@google.com>
+TryBot-Bypass: David Chase <drchase@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/2300f7ef07718f6be4d8aa8486c7de99836e233f]
+CVE: CVE-2023-29409
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/tls/handshake_client.go | 8 +++
+ src/crypto/tls/handshake_client_test.go | 78 +++++++++++++++++++++++++
+ src/crypto/tls/handshake_server.go | 4 ++
+ 3 files changed, 90 insertions(+)
+
+diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go
+index 4fb528c..ba33ea1 100644
+--- a/src/crypto/tls/handshake_client.go
++++ b/src/crypto/tls/handshake_client.go
+@@ -788,6 +788,10 @@ func (hs *clientHandshakeState) sendFinished(out []byte) error {
+ return nil
+ }
+
++// maxRSAKeySize is the maximum RSA key size in bits that we are willing
++// to verify the signatures of during a TLS handshake.
++const maxRSAKeySize = 8192
++
+ // verifyServerCertificate parses and verifies the provided chain, setting
+ // c.verifiedChains and c.peerCertificates or sending the appropriate alert.
+ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+@@ -798,6 +802,10 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse certificate from server: " + err.Error())
+ }
++ if cert.PublicKeyAlgorithm == x509.RSA && cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ certs[i] = cert
+ }
+
+diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
+index 6bd3c37..8d20b2b 100644
+--- a/src/crypto/tls/handshake_client_test.go
++++ b/src/crypto/tls/handshake_client_test.go
+@@ -1984,3 +1984,81 @@ func TestCloseClientConnectionOnIdleServer(t *testing.T) {
+ t.Errorf("Error expected, but no error returned")
+ }
+ }
++
++// discardConn wraps a net.Conn but discards all writes, but reports that they happened.
++type discardConn struct {
++ net.Conn
++}
++
++func (dc *discardConn) Write(data []byte) (int, error) {
++ return len(data), nil
++}
++
++// largeRSAKeyCertPEM contains a 8193 bit RSA key
++const largeRSAKeyCertPEM = `-----BEGIN CERTIFICATE-----
++MIIInjCCBIWgAwIBAgIBAjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwd0ZXN0
++aW5nMB4XDTIzMDYwNzIxMjMzNloXDTIzMDYwNzIzMjMzNlowEjEQMA4GA1UEAxMH
++dGVzdGluZzCCBCIwDQYJKoZIhvcNAQEBBQADggQPADCCBAoCggQBAWdHsf6Rh2Ca
++n2SQwn4t4OQrOjbLLdGE1pM6TBKKrHUFy62uEL8atNjlcfXIsa4aEu3xNGiqxqur
++ZectlkZbm0FkaaQ1Wr9oikDY3KfjuaXdPdO/XC/h8AKNxlDOylyXwUSK/CuYb+1j
++gy8yF5QFvVfwW/xwTlHmhUeSkVSQPosfQ6yXNNsmMzkd+ZPWLrfq4R+wiNtwYGu0
++WSBcI/M9o8/vrNLnIppoiBJJ13j9CR1ToEAzOFh9wwRWLY10oZhoh1ONN1KQURx4
++qedzvvP2DSjZbUccdvl2rBGvZpzfOiFdm1FCnxB0c72Cqx+GTHXBFf8bsa7KHky9
++sNO1GUanbq17WoDNgwbY6H51bfShqv0CErxatwWox3we4EcAmFHPVTCYL1oWVMGo
++a3Eth91NZj+b/nGhF9lhHKGzXSv9brmLLkfvM1jA6XhNhA7BQ5Vz67lj2j3XfXdh
++t/BU5pBXbL4Ut4mIhT1YnKXAjX2/LF5RHQTE8Vwkx5JAEKZyUEGOReD/B+7GOrLp
++HduMT9vZAc5aR2k9I8qq1zBAzsL69lyQNAPaDYd1BIAjUety9gAYaSQffCgAgpRO
++Gt+DYvxS+7AT/yEd5h74MU2AH7KrAkbXOtlwupiGwhMVTstncDJWXMJqbBhyHPF8
++3UmZH0hbL4PYmzSj9LDWQQXI2tv6vrCpfts3Cqhqxz9vRpgY7t1Wu6l/r+KxYYz3
++1pcGpPvRmPh0DJm7cPTiXqPnZcPt+ulSaSdlxmd19OnvG5awp0fXhxryZVwuiT8G
++VDkhyARrxYrdjlINsZJZbQjO0t8ketXAELJOnbFXXzeCOosyOHkLwsqOO96AVJA8
++45ZVL5m95ClGy0RSrjVIkXsxTAMVG6SPAqKwk6vmTdRGuSPS4rhgckPVDHmccmuq
++dfnT2YkX+wB2/M3oCgU+s30fAHGkbGZ0pCdNbFYFZLiH0iiMbTDl/0L/z7IdK0nH
++GLHVE7apPraKC6xl6rPWsD2iSfrmtIPQa0+rqbIVvKP5JdfJ8J4alI+OxFw/znQe
++V0/Rez0j22Fe119LZFFSXhRv+ZSvcq20xDwh00mzcumPWpYuCVPozA18yIhC9tNn
++ALHndz0tDseIdy9vC71jQWy9iwri3ueN0DekMMF8JGzI1Z6BAFzgyAx3DkHtwHg7
++B7qD0jPG5hJ5+yt323fYgJsuEAYoZ8/jzZ01pkX8bt+UsVN0DGnSGsI2ktnIIk3J
++l+8krjmUy6EaW79nITwoOqaeHOIp8m3UkjEcoKOYrzHRKqRy+A09rY+m/cAQaafW
++4xp0Zv7qZPLwnu0jsqB4jD8Ll9yPB02ndsoV6U5PeHzTkVhPml19jKUAwFfs7TJg
++kXy+/xFhYVUCAwEAATANBgkqhkiG9w0BAQsFAAOCBAIAAQnZY77pMNeypfpba2WK
++aDasT7dk2JqP0eukJCVPTN24Zca+xJNPdzuBATm/8SdZK9lddIbjSnWRsKvTnO2r
++/rYdlPf3jM5uuJtb8+Uwwe1s+gszelGS9G/lzzq+ehWicRIq2PFcs8o3iQMfENiv
++qILJ+xjcrvms5ZPDNahWkfRx3KCg8Q+/at2n5p7XYjMPYiLKHnDC+RE2b1qT20IZ
++FhuK/fTWLmKbfYFNNga6GC4qcaZJ7x0pbm4SDTYp0tkhzcHzwKhidfNB5J2vNz6l
++Ur6wiYwamFTLqcOwWo7rdvI+sSn05WQBv0QZlzFX+OAu0l7WQ7yU+noOxBhjvHds
++14+r9qcQZg2q9kG+evopYZqYXRUNNlZKo9MRBXhfrISulFAc5lRFQIXMXnglvAu+
++Ipz2gomEAOcOPNNVldhKAU94GAMJd/KfN0ZP7gX3YvPzuYU6XDhag5RTohXLm18w
++5AF+ES3DOQ6ixu3DTf0D+6qrDuK+prdX8ivcdTQVNOQ+MIZeGSc6NWWOTaMGJ3lg
++aZIxJUGdo6E7GBGiC1YTjgFKFbHzek1LRTh/LX3vbSudxwaG0HQxwsU9T4DWiMqa
++Fkf2KteLEUA6HrR+0XlAZrhwoqAmrJ+8lCFX3V0gE9lpENfVHlFXDGyx10DpTB28
++DdjnY3F7EPWNzwf9P3oNT69CKW3Bk6VVr3ROOJtDxVu1ioWo3TaXltQ0VOnap2Pu
++sa5wfrpfwBDuAS9JCDg4ttNp2nW3F7tgXC6xPqw5pvGwUppEw9XNrqV8TZrxduuv
++rQ3NyZ7KSzIpmFlD3UwV/fGfz3UQmHS6Ng1evrUID9DjfYNfRqSGIGjDfxGtYD+j
++Z1gLJZuhjJpNtwBkKRtlNtrCWCJK2hidK/foxwD7kwAPo2I9FjpltxCRywZUs07X
++KwXTfBR9v6ij1LV6K58hFS+8ezZyZ05CeVBFkMQdclTOSfuPxlMkQOtjp8QWDj+F
++j/MYziT5KBkHvcbrjdRtUJIAi4N7zCsPZtjik918AK1WBNRVqPbrgq/XSEXMfuvs
++6JbfK0B76vdBDRtJFC1JsvnIrGbUztxXzyQwFLaR/AjVJqpVlysLWzPKWVX6/+SJ
++u1NQOl2E8P6ycyBsuGnO89p0S4F8cMRcI2X1XQsZ7/q0NBrOMaEp5T3SrWo9GiQ3
++o2SBdbs3Y6MBPBtTu977Z/0RO63J3M5i2tjUiDfrFy7+VRLKr7qQ7JibohyB8QaR
++9tedgjn2f+of7PnP/PEl1cCphUZeHM7QKUMPT8dbqwmKtlYY43EHXcvNOT5IBk3X
++9lwJoZk/B2i+ZMRNSP34ztAwtxmasPt6RAWGQpWCn9qmttAHAnMfDqe7F7jVR6rS
++u58=
++-----END CERTIFICATE-----`
++
++func TestHandshakeRSATooBig(t *testing.T) {
++ testCert, _ := pem.Decode([]byte(largeRSAKeyCertPEM))
++
++ c := &Conn{conn: &discardConn{}, config: testConfig.Clone()}
++
++ expectedErr := "tls: server sent certificate containing RSA key larger than 8192 bits"
++ err := c.verifyServerCertificate([][]byte{testCert.Bytes})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.verifyServerCertificate unexpected error: want %q, got %q", expectedErr, err)
++ }
++
++ expectedErr = "tls: client sent certificate containing RSA key larger than 8192 bits"
++ err = c.processCertsFromClient(Certificate{Certificate: [][]byte{testCert.Bytes}})
++ if err == nil || err.Error() != expectedErr {
++ t.Errorf("Conn.processCertsFromClient unexpected error: want %q, got %q", expectedErr, err)
++ }
++}
+diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
+index b16415a..2e36840 100644
+--- a/src/crypto/tls/handshake_server.go
++++ b/src/crypto/tls/handshake_server.go
+@@ -738,6 +738,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
+ c.sendAlert(alertBadCertificate)
+ return errors.New("tls: failed to parse client certificate: " + err.Error())
+ }
++ if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
++ c.sendAlert(alertBadCertificate)
++ return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
++ }
+ }
+
+ if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch
new file mode 100644
index 0000000000..00def8fcda
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39318.patch
@@ -0,0 +1,262 @@
+From 023b542edf38e2a1f87fcefb9f75ff2f99401b4c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 3 Aug 2023 12:24:13 -0700
+Subject: [PATCH] [release-branch.go1.20] html/template: support HTML-like
+ comments in script contexts
+
+Per Appendix B.1.1 of the ECMAScript specification, support HTML-like
+comments in script contexts. Also per section 12.5, support hashbang
+comments. This brings our parsing in-line with how browsers treat these
+comment types.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62196
+Fixes #62395
+Fixes CVE-2023-39318
+
+Change-Id: Id512702c5de3ae46cf648e268cb10e1eb392a181
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976593
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014620
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526098
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/023b542edf38e2a1f87fcefb9f75ff2f99401b4c]
+CVE: CVE-2023-39318
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/html/template/context.go | 6 ++-
+ src/html/template/escape.go | 5 +-
+ src/html/template/escape_test.go | 10 ++++
+ src/html/template/state_string.go | 26 +++++-----
+ src/html/template/transition.go | 80 ++++++++++++++++++++-----------
+ 5 files changed, 84 insertions(+), 43 deletions(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index 0b65313..4eb7891 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -124,6 +124,10 @@ const (
+ stateJSBlockCmt
+ // stateJSLineCmt occurs inside a JavaScript // line comment.
+ stateJSLineCmt
++ // stateJSHTMLOpenCmt occurs inside a JavaScript <!-- HTML-like comment.
++ stateJSHTMLOpenCmt
++ // stateJSHTMLCloseCmt occurs inside a JavaScript --> HTML-like comment.
++ stateJSHTMLCloseCmt
+ // stateCSS occurs inside a <style> element or style attribute.
+ stateCSS
+ // stateCSSDqStr occurs inside a CSS double quoted string.
+@@ -149,7 +153,7 @@ const (
+ // authors & maintainers, not for end-users or machines.
+ func isComment(s state) bool {
+ switch s {
+- case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateCSSBlockCmt, stateCSSLineCmt:
++ case stateHTMLCmt, stateJSBlockCmt, stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt, stateCSSBlockCmt, stateCSSLineCmt:
+ return true
+ }
+ return false
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index 435f912..ad2ec69 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -698,9 +698,12 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ if c.state != c1.state && isComment(c1.state) && c1.delim == delimNone {
+ // Preserve the portion between written and the comment start.
+ cs := i1 - 2
+- if c1.state == stateHTMLCmt {
++ if c1.state == stateHTMLCmt || c1.state == stateJSHTMLOpenCmt {
+ // "<!--" instead of "/*" or "//"
+ cs -= 2
++ } else if c1.state == stateJSHTMLCloseCmt {
++ // "-->" instead of "/*" or "//"
++ cs -= 1
+ }
+ b.Write(s[written:cs])
+ written = i1
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index f550691..5f41e52 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -503,6 +503,16 @@ func TestEscape(t *testing.T) {
+ "<script>var a/*b*///c\nd</script>",
+ "<script>var a \nd</script>",
+ },
++ {
++ "JS HTML-like comments",
++ "<script>before <!-- beep\nbetween\nbefore-->boop\n</script>",
++ "<script>before \nbetween\nbefore\n</script>",
++ },
++ {
++ "JS hashbang comment",
++ "<script>#! beep\n</script>",
++ "<script>\n</script>",
++ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+diff --git a/src/html/template/state_string.go b/src/html/template/state_string.go
+index 05104be..b5cfe70 100644
+--- a/src/html/template/state_string.go
++++ b/src/html/template/state_string.go
+@@ -25,21 +25,23 @@ func _() {
+ _ = x[stateJSRegexp-14]
+ _ = x[stateJSBlockCmt-15]
+ _ = x[stateJSLineCmt-16]
+- _ = x[stateCSS-17]
+- _ = x[stateCSSDqStr-18]
+- _ = x[stateCSSSqStr-19]
+- _ = x[stateCSSDqURL-20]
+- _ = x[stateCSSSqURL-21]
+- _ = x[stateCSSURL-22]
+- _ = x[stateCSSBlockCmt-23]
+- _ = x[stateCSSLineCmt-24]
+- _ = x[stateError-25]
+- _ = x[stateDead-26]
++ _ = x[stateJSHTMLOpenCmt-17]
++ _ = x[stateJSHTMLCloseCmt-18]
++ _ = x[stateCSS-19]
++ _ = x[stateCSSDqStr-20]
++ _ = x[stateCSSSqStr-21]
++ _ = x[stateCSSDqURL-22]
++ _ = x[stateCSSSqURL-23]
++ _ = x[stateCSSURL-24]
++ _ = x[stateCSSBlockCmt-25]
++ _ = x[stateCSSLineCmt-26]
++ _ = x[stateError-27]
++ _ = x[stateDead-28]
+ }
+
+-const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
++const _state_name = "stateTextstateTagstateAttrNamestateAfterNamestateBeforeValuestateHTMLCmtstateRCDATAstateAttrstateURLstateSrcsetstateJSstateJSDqStrstateJSSqStrstateJSBqStrstateJSRegexpstateJSBlockCmtstateJSLineCmtstateJSHTMLOpenCmtstateJSHTMLCloseCmtstateCSSstateCSSDqStrstateCSSSqStrstateCSSDqURLstateCSSSqURLstateCSSURLstateCSSBlockCmtstateCSSLineCmtstateErrorstateDead"
+
+-var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 204, 217, 230, 243, 256, 267, 283, 298, 308, 317}
++var _state_index = [...]uint16{0, 9, 17, 30, 44, 60, 72, 83, 92, 100, 111, 118, 130, 142, 154, 167, 182, 196, 214, 233, 241, 254, 267, 280, 293, 304, 320, 335, 345, 354}
+
+ func (i state) String() string {
+ if i >= state(len(_state_index)-1) {
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 92eb351..12aa4c4 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -14,32 +14,34 @@ import (
+ // the updated context and the number of bytes consumed from the front of the
+ // input.
+ var transitionFunc = [...]func(context, []byte) (context, int){
+- stateText: tText,
+- stateTag: tTag,
+- stateAttrName: tAttrName,
+- stateAfterName: tAfterName,
+- stateBeforeValue: tBeforeValue,
+- stateHTMLCmt: tHTMLCmt,
+- stateRCDATA: tSpecialTagEnd,
+- stateAttr: tAttr,
+- stateURL: tURL,
+- stateSrcset: tURL,
+- stateJS: tJS,
+- stateJSDqStr: tJSDelimited,
+- stateJSSqStr: tJSDelimited,
+- stateJSBqStr: tJSDelimited,
+- stateJSRegexp: tJSDelimited,
+- stateJSBlockCmt: tBlockCmt,
+- stateJSLineCmt: tLineCmt,
+- stateCSS: tCSS,
+- stateCSSDqStr: tCSSStr,
+- stateCSSSqStr: tCSSStr,
+- stateCSSDqURL: tCSSStr,
+- stateCSSSqURL: tCSSStr,
+- stateCSSURL: tCSSStr,
+- stateCSSBlockCmt: tBlockCmt,
+- stateCSSLineCmt: tLineCmt,
+- stateError: tError,
++ stateText: tText,
++ stateTag: tTag,
++ stateAttrName: tAttrName,
++ stateAfterName: tAfterName,
++ stateBeforeValue: tBeforeValue,
++ stateHTMLCmt: tHTMLCmt,
++ stateRCDATA: tSpecialTagEnd,
++ stateAttr: tAttr,
++ stateURL: tURL,
++ stateSrcset: tURL,
++ stateJS: tJS,
++ stateJSDqStr: tJSDelimited,
++ stateJSSqStr: tJSDelimited,
++ stateJSBqStr: tJSDelimited,
++ stateJSRegexp: tJSDelimited,
++ stateJSBlockCmt: tBlockCmt,
++ stateJSLineCmt: tLineCmt,
++ stateJSHTMLOpenCmt: tLineCmt,
++ stateJSHTMLCloseCmt: tLineCmt,
++ stateCSS: tCSS,
++ stateCSSDqStr: tCSSStr,
++ stateCSSSqStr: tCSSStr,
++ stateCSSDqURL: tCSSStr,
++ stateCSSSqURL: tCSSStr,
++ stateCSSURL: tCSSStr,
++ stateCSSBlockCmt: tBlockCmt,
++ stateCSSLineCmt: tLineCmt,
++ stateError: tError,
+ }
+
+ var commentStart = []byte("<!--")
+@@ -263,7 +265,7 @@ func tURL(c context, s []byte) (context, int) {
+
+ // tJS is the context transition function for the JS state.
+ func tJS(c context, s []byte) (context, int) {
+- i := bytes.IndexAny(s, "\"`'/")
++ i := bytes.IndexAny(s, "\"`'/<-#")
+ if i == -1 {
+ // Entire input is non string, comment, regexp tokens.
+ c.jsCtx = nextJSCtx(s, c.jsCtx)
+@@ -293,6 +295,26 @@ func tJS(c context, s []byte) (context, int) {
+ err: errorf(ErrSlashAmbig, nil, 0, "'/' could start a division or regexp: %.32q", s[i:]),
+ }, len(s)
+ }
++ // ECMAScript supports HTML style comments for legacy reasons, see Appendix
++ // B.1.1 "HTML-like Comments". The handling of these comments is somewhat
++ // confusing. Multi-line comments are not supported, i.e. anything on lines
++ // between the opening and closing tokens is not considered a comment, but
++ // anything following the opening or closing token, on the same line, is
++ // ignored. As such we simply treat any line prefixed with "<!--" or "-->"
++ // as if it were actually prefixed with "//" and move on.
++ case '<':
++ if i+3 < len(s) && bytes.Equal(commentStart, s[i:i+4]) {
++ c.state, i = stateJSHTMLOpenCmt, i+3
++ }
++ case '-':
++ if i+2 < len(s) && bytes.Equal(commentEnd, s[i:i+3]) {
++ c.state, i = stateJSHTMLCloseCmt, i+2
++ }
++ // ECMAScript also supports "hashbang" comment lines, see Section 12.5.
++ case '#':
++ if i+1 < len(s) && s[i+1] == '!' {
++ c.state, i = stateJSLineCmt, i+1
++ }
+ default:
+ panic("unreachable")
+ }
+@@ -372,12 +394,12 @@ func tBlockCmt(c context, s []byte) (context, int) {
+ return c, i + 2
+ }
+
+-// tLineCmt is the context transition function for //comment states.
++// tLineCmt is the context transition function for //comment states, and the JS HTML-like comment state.
+ func tLineCmt(c context, s []byte) (context, int) {
+ var lineTerminators string
+ var endState state
+ switch c.state {
+- case stateJSLineCmt:
++ case stateJSLineCmt, stateJSHTMLOpenCmt, stateJSHTMLCloseCmt:
+ lineTerminators, endState = "\n\r\u2028\u2029", stateJS
+ case stateCSSLineCmt:
+ lineTerminators, endState = "\n\f\r", stateCSS
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch
new file mode 100644
index 0000000000..69106e3e05
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39319.patch
@@ -0,0 +1,230 @@
+From 2070531d2f53df88e312edace6c8dfc9686ab2f5 Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Thu, 3 Aug 2023 12:28:28 -0700
+Subject: [PATCH] [release-branch.go1.20] html/template: properly handle
+ special tags within the script context
+
+The HTML specification has incredibly complex rules for how to handle
+"<!--", "<script", and "</script" when they appear within literals in
+the script context. Rather than attempting to apply these restrictions
+(which require a significantly more complex state machine) we apply
+the workaround suggested in section 4.12.1.3 of the HTML specification [1].
+
+More precisely, when "<!--", "<script", and "</script" appear within
+literals (strings and regular expressions, ignoring comments since we
+already elide their content) we replace the "<" with "\x3C". This avoids
+the unintuitive behavior that using these tags within literals can cause,
+by simply preventing the rendered content from triggering it. This may
+break some correct usages of these tags, but on balance is more likely
+to prevent XSS attacks where users are unknowingly either closing or not
+closing the script blocks where they think they are.
+
+Thanks to Takeshi Kaneko (GMO Cybersecurity by Ierae, Inc.) for
+reporting this issue.
+
+Fixes #62197
+Fixes #62397
+Fixes CVE-2023-39319
+
+[1] https://html.spec.whatwg.org/#restrictions-for-contents-of-script-elements
+
+Change-Id: Iab57b0532694827e3eddf57a7497ba1fab1746dc
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/1976594
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2014621
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/526099
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Run-TryBot: Cherry Mui <cherryyz@google.com>
+
+Upstream-Status: Backport from [https://github.com/golang/go/commit/2070531d2f53df88e312edace6c8dfc9686ab2f5]
+CVE: CVE-2023-39319
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/html/template/context.go | 14 ++++++++++
+ src/html/template/escape.go | 26 ++++++++++++++++++
+ src/html/template/escape_test.go | 47 +++++++++++++++++++++++++++++++-
+ src/html/template/transition.go | 15 ++++++++++
+ 4 files changed, 101 insertions(+), 1 deletion(-)
+
+diff --git a/src/html/template/context.go b/src/html/template/context.go
+index 4eb7891..feb6517 100644
+--- a/src/html/template/context.go
++++ b/src/html/template/context.go
+@@ -168,6 +168,20 @@ func isInTag(s state) bool {
+ return false
+ }
+
++// isInScriptLiteral returns true if s is one of the literal states within a
++// <script> tag, and as such occurances of "<!--", "<script", and "</script"
++// need to be treated specially.
++func isInScriptLiteral(s state) bool {
++ // Ignore the comment states (stateJSBlockCmt, stateJSLineCmt,
++ // stateJSHTMLOpenCmt, stateJSHTMLCloseCmt) because their content is already
++ // omitted from the output.
++ switch s {
++ case stateJSDqStr, stateJSSqStr, stateJSBqStr, stateJSRegexp:
++ return true
++ }
++ return false
++}
++
+ // delim is the delimiter that will end the current HTML attribute.
+ type delim uint8
+
+diff --git a/src/html/template/escape.go b/src/html/template/escape.go
+index ad2ec69..de8cf6f 100644
+--- a/src/html/template/escape.go
++++ b/src/html/template/escape.go
+@@ -10,6 +10,7 @@ import (
+ "html"
+ "internal/godebug"
+ "io"
++ "regexp"
+ "text/template"
+ "text/template/parse"
+ )
+@@ -650,6 +651,26 @@ var delimEnds = [...]string{
+ delimSpaceOrTagEnd: " \t\n\f\r>",
+ }
+
++var (
++ // Per WHATWG HTML specification, section 4.12.1.3, there are extremely
++ // complicated rules for how to handle the set of opening tags <!--,
++ // <script, and </script when they appear in JS literals (i.e. strings,
++ // regexs, and comments). The specification suggests a simple solution,
++ // rather than implementing the arcane ABNF, which involves simply escaping
++ // the opening bracket with \x3C. We use the below regex for this, since it
++ // makes doing the case-insensitive find-replace much simpler.
++ specialScriptTagRE = regexp.MustCompile("(?i)<(script|/script|!--)")
++ specialScriptTagReplacement = []byte("\\x3C$1")
++)
++
++func containsSpecialScriptTag(s []byte) bool {
++ return specialScriptTagRE.Match(s)
++}
++
++func escapeSpecialScriptTags(s []byte) []byte {
++ return specialScriptTagRE.ReplaceAll(s, specialScriptTagReplacement)
++}
++
+ var doctypeBytes = []byte("<!DOCTYPE")
+
+ // escapeText escapes a text template node.
+@@ -708,6 +729,11 @@ func (e *escaper) escapeText(c context, n *parse.TextNode) context {
+ b.Write(s[written:cs])
+ written = i1
+ }
++ if isInScriptLiteral(c.state) && containsSpecialScriptTag(s[i:i1]) {
++ b.Write(s[written:i])
++ b.Write(escapeSpecialScriptTags(s[i:i1]))
++ written = i1
++ }
+ if i == i1 && c.state == c1.state {
+ panic(fmt.Sprintf("infinite loop from %v to %v on %q..%q", c, c1, s[:i], s[i:]))
+ }
+diff --git a/src/html/template/escape_test.go b/src/html/template/escape_test.go
+index 5f41e52..0cacb20 100644
+--- a/src/html/template/escape_test.go
++++ b/src/html/template/escape_test.go
+@@ -513,6 +513,21 @@ func TestEscape(t *testing.T) {
+ "<script>#! beep\n</script>",
+ "<script>\n</script>",
+ },
++ {
++ "Special tags in <script> string literals",
++ `<script>var a = "asd < 123 <!-- 456 < fgh <script jkl < 789 </script"</script>`,
++ `<script>var a = "asd < 123 \x3C!-- 456 < fgh \x3Cscript jkl < 789 \x3C/script"</script>`,
++ },
++ {
++ "Special tags in <script> string literals (mixed case)",
++ `<script>var a = "<!-- <ScripT </ScripT"</script>`,
++ `<script>var a = "\x3C!-- \x3CScripT \x3C/ScripT"</script>`,
++ },
++ {
++ "Special tags in <script> regex literals (mixed case)",
++ `<script>var a = /<!-- <ScripT </ScripT/</script>`,
++ `<script>var a = /\x3C!-- \x3CScripT \x3C/ScripT/</script>`,
++ },
+ {
+ "CSS comments",
+ "<style>p// paragraph\n" +
+@@ -1501,8 +1516,38 @@ func TestEscapeText(t *testing.T) {
+ context{state: stateJS, element: elementScript},
+ },
+ {
++ // <script and </script tags are escaped, so </script> should not
++ // cause us to exit the JS state.
+ `<script>document.write("<script>alert(1)</script>");`,
+- context{state: stateText},
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</script>`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)<!--`,
++ context{state: stateJSDqStr, element: elementScript},
++ },
++ {
++ `<script>document.write("<script>alert(1)</Script>");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>document.write("<!--");`,
++ context{state: stateJS, element: elementScript},
++ },
++ {
++ `<script>let a = /</script`,
++ context{state: stateJSRegexp, element: elementScript},
++ },
++ {
++ `<script>let a = /</script/`,
++ context{state: stateJS, element: elementScript, jsCtx: jsCtxDivOp},
+ },
+ {
+ `<script type="text/template">`,
+diff --git a/src/html/template/transition.go b/src/html/template/transition.go
+index 12aa4c4..3d2a37c 100644
+--- a/src/html/template/transition.go
++++ b/src/html/template/transition.go
+@@ -214,6 +214,11 @@ var (
+ // element states.
+ func tSpecialTagEnd(c context, s []byte) (context, int) {
+ if c.element != elementNone {
++ // script end tags ("</script") within script literals are ignored, so that
++ // we can properly escape them.
++ if c.element == elementScript && (isInScriptLiteral(c.state) || isComment(c.state)) {
++ return c, len(s)
++ }
+ if i := indexTagEnd(s, specialTagEndMarkers[c.element]); i != -1 {
+ return context{}, i
+ }
+@@ -353,6 +358,16 @@ func tJSDelimited(c context, s []byte) (context, int) {
+ inCharset = true
+ case ']':
+ inCharset = false
++ case '/':
++ // If "</script" appears in a regex literal, the '/' should not
++ // close the regex literal, and it will later be escaped to
++ // "\x3C/script" in escapeText.
++ if i > 0 && i+7 <= len(s) && bytes.Compare(bytes.ToLower(s[i-1:i+7]), []byte("</script")) == 0 {
++ i++
++ } else if !inCharset {
++ c.state, c.jsCtx = stateJS, jsCtxDivOp
++ return c, i + 1
++ }
+ default:
+ // end delimiter
+ if !inCharset {
+--
+2.24.4
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch
new file mode 100644
index 0000000000..998af361e8
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-39326.patch
@@ -0,0 +1,181 @@
+From 6446af942e2e2b161c4ec1b60d9703a2b55dc4dd Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 7 Nov 2023 10:47:56 -0800
+Subject: [PATCH] [release-branch.go1.20] net/http: limit chunked data overhead
+
+The chunked transfer encoding adds some overhead to
+the content transferred. When writing one byte per
+chunk, for example, there are five bytes of overhead
+per byte of data transferred: "1\r\nX\r\n" to send "X".
+
+Chunks may include "chunk extensions",
+which we skip over and do not use.
+For example: "1;chunk extension here\r\nX\r\n".
+
+A malicious sender can use chunk extensions to add
+about 4k of overhead per byte of data.
+(The maximum chunk header line size we will accept.)
+
+Track the amount of overhead read in chunked data,
+and produce an error if it seems excessive.
+
+Updates #64433
+Fixes #64434
+Fixes CVE-2023-39326
+
+Change-Id: I40f8d70eb6f9575fb43f506eb19132ccedafcf39
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2076135
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+(cherry picked from commit 3473ae72ee66c60744665a24b2fde143e8964d4f)
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2095407
+Run-TryBot: Roland Shoemaker <bracewell@google.com>
+TryBot-Result: Security TryBots <security-trybots@go-security-trybots.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/547355
+Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/6446af942e2e2b161c4ec1b60d9703a2b55dc4dd]
+CVE: CVE-2023-39326
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/internal/chunked.go | 36 +++++++++++++---
+ src/net/http/internal/chunked_test.go | 59 +++++++++++++++++++++++++++
+ 2 files changed, 89 insertions(+), 6 deletions(-)
+
+diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go
+index f06e572..ddbaacb 100644
+--- a/src/net/http/internal/chunked.go
++++ b/src/net/http/internal/chunked.go
+@@ -39,7 +39,8 @@ type chunkedReader struct {
+ n uint64 // unread bytes in chunk
+ err error
+ buf [2]byte
+- checkEnd bool // whether need to check for \r\n chunk footer
++ checkEnd bool // whether need to check for \r\n chunk footer
++ excess int64 // "excessive" chunk overhead, for malicious sender detection
+ }
+
+ func (cr *chunkedReader) beginChunk() {
+@@ -49,10 +50,38 @@ func (cr *chunkedReader) beginChunk() {
+ if cr.err != nil {
+ return
+ }
++ cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
++ line = trimTrailingWhitespace(line)
++ line, cr.err = removeChunkExtension(line)
++ if cr.err != nil {
++ return
++ }
+ cr.n, cr.err = parseHexUint(line)
+ if cr.err != nil {
+ return
+ }
++ // A sender who sends one byte per chunk will send 5 bytes of overhead
++ // for every byte of data. ("1\r\nX\r\n" to send "X".)
++ // We want to allow this, since streaming a byte at a time can be legitimate.
++ //
++ // A sender can use chunk extensions to add arbitrary amounts of additional
++ // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
++ // We don't want to disallow extensions (although we discard them),
++ // but we also don't want to allow a sender to reduce the signal/noise ratio
++ // arbitrarily.
++ //
++ // We track the amount of excess overhead read,
++ // and produce an error if it grows too large.
++ //
++ // Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
++ // plus twice the amount of real data in the chunk.
++ cr.excess -= 16 + (2 * int64(cr.n))
++ if cr.excess < 0 {
++ cr.excess = 0
++ }
++ if cr.excess > 16*1024 {
++ cr.err = errors.New("chunked encoding contains too much non-data")
++ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+@@ -133,11 +162,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) {
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+- p = trimTrailingWhitespace(p)
+- p, err = removeChunkExtension(p)
+- if err != nil {
+- return nil, err
+- }
+ return p, nil
+ }
+
+diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go
+index d067165..b20747d 100644
+--- a/src/net/http/internal/chunked_test.go
++++ b/src/net/http/internal/chunked_test.go
+@@ -212,3 +212,62 @@ func TestChunkReadPartial(t *testing.T) {
+ }
+
+ }
++
++func TestChunkReaderTooMuchOverhead(t *testing.T) {
++ // If the sender is sending 100x as many chunk header bytes as chunk data,
++ // we should reject the stream at some point.
++ chunk := []byte("1;")
++ for i := 0; i < 100; i++ {
++ chunk = append(chunk, 'a') // chunk extension
++ }
++ chunk = append(chunk, "\r\nX\r\n"...)
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return chunk, nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ _, err := io.ReadAll(r)
++ if err == nil {
++ t.Fatalf("successfully read body with excessive overhead; want error")
++ }
++}
++
++func TestChunkReaderByteAtATime(t *testing.T) {
++ // Sending one byte per chunk should not trip the excess-overhead detection.
++ const bodylen = 1 << 20
++ r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
++ if i < bodylen {
++ return []byte("1\r\nX\r\n"), nil
++ }
++ return []byte("0\r\n"), nil
++ }})
++ got, err := io.ReadAll(r)
++ if err != nil {
++ t.Errorf("unexpected error: %v", err)
++ }
++ if len(got) != bodylen {
++ t.Errorf("read %v bytes, want %v", len(got), bodylen)
++ }
++}
++
++type funcReader struct {
++ f func(iteration int) ([]byte, error)
++ i int
++ b []byte
++ err error
++}
++
++func (r *funcReader) Read(p []byte) (n int, err error) {
++ if len(r.b) == 0 && r.err == nil {
++ r.b, r.err = r.f(r.i)
++ r.i++
++ }
++ n = copy(p, r.b)
++ r.b = r.b[n:]
++ if len(r.b) > 0 {
++ return n, nil
++ }
++ return n, r.err
++}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch
new file mode 100644
index 0000000000..4d65180253
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre1.patch
@@ -0,0 +1,393 @@
+From 9baafabac9a84813a336f068862207d2bb06d255 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Wed, 1 Apr 2020 17:25:40 -0400
+Subject: [PATCH] crypto/rsa: refactor RSA-PSS signing and verification
+
+Cleaned up for readability and consistency.
+
+There is one tiny behavioral change: when PSSSaltLengthEqualsHash is
+used and both hash and opts.Hash were set, hash.Size() was used for the
+salt length instead of opts.Hash.Size(). That's clearly wrong because
+opts.Hash is documented to override hash.
+
+Change-Id: I3e25dad933961eac827c6d2e3bbfe45fc5a6fb0e
+Reviewed-on: https://go-review.googlesource.com/c/go/+/226937
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gobot Gobot <gobot@golang.org>
+Reviewed-by: Katie Hockman <katie@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/9baafabac9a84813a336f068862207d2bb06d255]
+CVE: CVE-2023-45287 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/pss.go | 173 ++++++++++++++++++++++--------------------
+ src/crypto/rsa/rsa.go | 9 ++-
+ 2 files changed, 96 insertions(+), 86 deletions(-)
+
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index 3ff0c2f4d0076..f9844d87329a8 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -4,9 +4,7 @@
+
+ package rsa
+
+-// This file implements the PSS signature scheme [1].
+-//
+-// [1] https://www.emc.com/collateral/white-papers/h11300-pkcs-1v2-2-rsa-cryptography-standard-wp.pdf
++// This file implements the RSASSA-PSS signature scheme according to RFC 8017.
+
+ import (
+ "bytes"
+@@ -17,8 +15,22 @@ import (
+ "math/big"
+ )
+
++// Per RFC 8017, Section 9.1
++//
++// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
++//
++// where
++//
++// DB = PS || 0x01 || salt
++//
++// and PS can be empty so
++//
++// emLen = dbLen + hLen + 1 = psLen + sLen + hLen + 2
++//
++
+ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
+- // See [1], section 9.1.1
++ // See RFC 8017, Section 9.1.1.
++
+ hLen := hash.Size()
+ sLen := len(salt)
+ emLen := (emBits + 7) / 8
+@@ -30,7 +42,7 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+
+ if len(mHash) != hLen {
+- return nil, errors.New("crypto/rsa: input must be hashed message")
++ return nil, errors.New("crypto/rsa: input must be hashed with given hash")
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
+@@ -40,8 +52,9 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ }
+
+ em := make([]byte, emLen)
+- db := em[:emLen-sLen-hLen-2+1+sLen]
+- h := em[emLen-sLen-hLen-2+1+sLen : emLen-1]
++ psLen := emLen - sLen - hLen - 2
++ db := em[:psLen+1+sLen]
++ h := em[psLen+1+sLen : emLen-1]
+
+ // 4. Generate a random octet string salt of length sLen; if sLen = 0,
+ // then salt is the empty string.
+@@ -69,8 +82,8 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
+ // emLen - hLen - 1.
+
+- db[emLen-sLen-hLen-2] = 0x01
+- copy(db[emLen-sLen-hLen-1:], salt)
++ db[psLen] = 0x01
++ copy(db[psLen+1:], salt)
+
+ // 9. Let dbMask = MGF(H, emLen - hLen - 1).
+ //
+@@ -81,47 +94,57 @@ func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byt
+ // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB to zero.
+
+- db[0] &= (0xFF >> uint(8*emLen-emBits))
++ db[0] &= 0xff >> (8*emLen - emBits)
+
+ // 12. Let EM = maskedDB || H || 0xbc.
+- em[emLen-1] = 0xBC
++ em[emLen-1] = 0xbc
+
+ // 13. Output EM.
+ return em, nil
+ }
+
+ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
++ // See RFC 8017, Section 9.1.2.
++
++ hLen := hash.Size()
++ if sLen == PSSSaltLengthEqualsHash {
++ sLen = hLen
++ }
++ emLen := (emBits + 7) / 8
++ if emLen != len(em) {
++ return errors.New("rsa: internal error: inconsistent length")
++ }
++
+ // 1. If the length of M is greater than the input limitation for the
+ // hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
+ // and stop.
+ //
+ // 2. Let mHash = Hash(M), an octet string of length hLen.
+- hLen := hash.Size()
+ if hLen != len(mHash) {
+ return ErrVerification
+ }
+
+ // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
+- emLen := (emBits + 7) / 8
+ if emLen < hLen+sLen+2 {
+ return ErrVerification
+ }
+
+ // 4. If the rightmost octet of EM does not have hexadecimal value
+ // 0xbc, output "inconsistent" and stop.
+- if em[len(em)-1] != 0xBC {
++ if em[emLen-1] != 0xbc {
+ return ErrVerification
+ }
+
+ // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
+ // let H be the next hLen octets.
+ db := em[:emLen-hLen-1]
+- h := em[emLen-hLen-1 : len(em)-1]
++ h := em[emLen-hLen-1 : emLen-1]
+
+ // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
+ // maskedDB are not all equal to zero, output "inconsistent" and
+ // stop.
+- if em[0]&(0xFF<<uint(8-(8*emLen-emBits))) != 0 {
++ var bitMask byte = 0xff >> (8*emLen - emBits)
++ if em[0] & ^bitMask != 0 {
+ return ErrVerification
+ }
+
+@@ -132,37 +155,30 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+
+ // 9. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in DB
+ // to zero.
+- db[0] &= (0xFF >> uint(8*emLen-emBits))
++ db[0] &= bitMask
+
++ // If we don't know the salt length, look for the 0x01 delimiter.
+ if sLen == PSSSaltLengthAuto {
+- FindSaltLength:
+- for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {
+- switch db[emLen-hLen-sLen-2] {
+- case 1:
+- break FindSaltLength
+- case 0:
+- continue
+- default:
+- return ErrVerification
+- }
+- }
+- if sLen < 0 {
++ psLen := bytes.IndexByte(db, 0x01)
++ if psLen < 0 {
+ return ErrVerification
+ }
+- } else {
+- // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
+- // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
+- // position is "position 1") does not have hexadecimal value 0x01,
+- // output "inconsistent" and stop.
+- for _, e := range db[:emLen-hLen-sLen-2] {
+- if e != 0x00 {
+- return ErrVerification
+- }
+- }
+- if db[emLen-hLen-sLen-2] != 0x01 {
++ sLen = len(db) - psLen - 1
++ }
++
++ // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
++ // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
++ // position is "position 1") does not have hexadecimal value 0x01,
++ // output "inconsistent" and stop.
++ psLen := emLen - hLen - sLen - 2
++ for _, e := range db[:psLen] {
++ if e != 0x00 {
+ return ErrVerification
+ }
+ }
++ if db[psLen] != 0x01 {
++ return ErrVerification
++ }
+
+ // 11. Let salt be the last sLen octets of DB.
+ salt := db[len(db)-sLen:]
+@@ -181,19 +197,19 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ h0 := hash.Sum(nil)
+
+ // 14. If H = H', output "consistent." Otherwise, output "inconsistent."
+- if !bytes.Equal(h0, h) {
++ if !bytes.Equal(h0, h) { // TODO: constant time?
+ return ErrVerification
+ }
+ return nil
+ }
+
+-// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.
++// signPSSWithSalt calculates the signature of hashed using PSS with specified salt.
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+ func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
+- nBits := priv.N.BitLen()
+- em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())
++ emBits := priv.N.BitLen() - 1
++ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return
+ }
+@@ -202,7 +218,7 @@ func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed,
+ if err != nil {
+ return
+ }
+- s = make([]byte, (nBits+7)/8)
++ s = make([]byte, priv.Size())
+ copyWithLeftPad(s, c.Bytes())
+ return
+ }
+@@ -223,16 +239,15 @@ type PSSOptions struct {
+ // PSSSaltLength constants.
+ SaltLength int
+
+- // Hash, if not zero, overrides the hash function passed to SignPSS.
+- // This is the only way to specify the hash function when using the
+- // crypto.Signer interface.
++ // Hash is the hash function used to generate the message digest. If not
++ // zero, it overrides the hash function passed to SignPSS. It's required
++ // when using PrivateKey.Sign.
+ Hash crypto.Hash
+ }
+
+-// HashFunc returns pssOpts.Hash so that PSSOptions implements
+-// crypto.SignerOpts.
+-func (pssOpts *PSSOptions) HashFunc() crypto.Hash {
+- return pssOpts.Hash
++// HashFunc returns opts.Hash so that PSSOptions implements crypto.SignerOpts.
++func (opts *PSSOptions) HashFunc() crypto.Hash {
++ return opts.Hash
+ }
+
+ func (opts *PSSOptions) saltLength() int {
+@@ -242,56 +257,50 @@ func (opts *PSSOptions) saltLength() int {
+ return opts.SaltLength
+ }
+
+-// SignPSS calculates the signature of hashed using RSASSA-PSS [1].
+-// Note that hashed must be the result of hashing the input message using the
+-// given hash function. The opts argument may be nil, in which case sensible
+-// defaults are used.
+-func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) ([]byte, error) {
++// SignPSS calculates the signature of digest using PSS.
++//
++// digest must be the result of hashing the input message using the given hash
++// function. The opts argument may be nil, in which case sensible defaults are
++// used. If opts.Hash is set, it overrides hash.
++func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error) {
++ if opts != nil && opts.Hash != 0 {
++ hash = opts.Hash
++ }
++
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size()
++ saltLength = priv.Size() - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+
+- if opts != nil && opts.Hash != 0 {
+- hash = opts.Hash
+- }
+-
+ salt := make([]byte, saltLength)
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+- return signPSSWithSalt(rand, priv, hash, hashed, salt)
++ return signPSSWithSalt(rand, priv, hash, digest, salt)
+ }
+
+ // VerifyPSS verifies a PSS signature.
+-// hashed is the result of hashing the input message using the given hash
+-// function and sig is the signature. A valid signature is indicated by
+-// returning a nil error. The opts argument may be nil, in which case sensible
+-// defaults are used.
+-func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {
+- return verifyPSS(pub, hash, hashed, sig, opts.saltLength())
+-}
+-
+-// verifyPSS verifies a PSS signature with the given salt length.
+-func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {
+- nBits := pub.N.BitLen()
+- if len(sig) != (nBits+7)/8 {
++//
++// A valid signature is indicated by returning a nil error. digest must be the
++// result of hashing the input message using the given hash function. The opts
++// argument may be nil, in which case sensible defaults are used. opts.Hash is
++// ignored.
++func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error {
++ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+ s := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, s)
+- emBits := nBits - 1
++ emBits := pub.N.BitLen() - 1
+ emLen := (emBits + 7) / 8
+- if emLen < len(m.Bytes()) {
++ emBytes := m.Bytes()
++ if emLen < len(emBytes) {
+ return ErrVerification
+ }
+ em := make([]byte, emLen)
+- copyWithLeftPad(em, m.Bytes())
+- if saltLen == PSSSaltLengthEqualsHash {
+- saltLen = hash.Size()
+- }
+- return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())
++ copyWithLeftPad(em, emBytes)
++ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index 5a42990640164..b4bfa13defbdf 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// Package rsa implements RSA encryption as specified in PKCS#1.
++// Package rsa implements RSA encryption as specified in PKCS#1 and RFC 8017.
+ //
+ // RSA is a single, fundamental operation that is used in this package to
+ // implement either public-key encryption or public-key signatures.
+@@ -10,13 +10,13 @@
+ // The original specification for encryption and signatures with RSA is PKCS#1
+ // and the terms "RSA encryption" and "RSA signatures" by default refer to
+ // PKCS#1 version 1.5. However, that specification has flaws and new designs
+-// should use version two, usually called by just OAEP and PSS, where
++// should use version 2, usually called by just OAEP and PSS, where
+ // possible.
+ //
+ // Two sets of interfaces are included in this package. When a more abstract
+ // interface isn't necessary, there are functions for encrypting/decrypting
+ // with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
+-// over the public-key primitive, the PrivateKey struct implements the
++// over the public key primitive, the PrivateKey type implements the
+ // Decrypter and Signer interfaces from the crypto package.
+ //
+ // The RSA operations in this package are not implemented using constant-time algorithms.
+@@ -111,7 +111,8 @@ func (priv *PrivateKey) Public() crypto.PublicKey {
+
+ // Sign signs digest with priv, reading randomness from rand. If opts is a
+ // *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will
+-// be used.
++// be used. digest must be the result of hashing the input message using
++// opts.HashFunc().
+ //
+ // This method implements crypto.Signer, which is an interface to support keys
+ // where the private part is kept in, for example, a hardware module. Common
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch
new file mode 100644
index 0000000000..1327b44545
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre2.patch
@@ -0,0 +1,401 @@
+From c9d5f60eaa4450ccf1ce878d55b4c6a12843f2f3 Mon Sep 17 00:00:00 2001
+From: Filippo Valsorda <filippo@golang.org>
+Date: Mon, 27 Apr 2020 21:52:38 -0400
+Subject: [PATCH] math/big: add (*Int).FillBytes
+
+Replaced almost every use of Bytes with FillBytes.
+
+Note that the approved proposal was for
+
+ func (*Int) FillBytes(buf []byte)
+
+while this implements
+
+ func (*Int) FillBytes(buf []byte) []byte
+
+because the latter was far nicer to use in all callsites.
+
+Fixes #35833
+
+Change-Id: Ia912df123e5d79b763845312ea3d9a8051343c0a
+Reviewed-on: https://go-review.googlesource.com/c/go/+/230397
+Reviewed-by: Robert Griesemer <gri@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/c9d5f60eaa4450ccf1ce878d55b4c6a12843f2f3]
+CVE: CVE-2023-45287 #Dependency Patch2
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/elliptic/elliptic.go | 13 ++++----
+ src/crypto/rsa/pkcs1v15.go | 20 +++---------
+ src/crypto/rsa/pss.go | 17 +++++------
+ src/crypto/rsa/rsa.go | 32 +++----------------
+ src/crypto/tls/key_schedule.go | 7 ++---
+ src/crypto/x509/sec1.go | 7 ++---
+ src/math/big/int.go | 15 +++++++++
+ src/math/big/int_test.go | 54 +++++++++++++++++++++++++++++++++
+ src/math/big/nat.go | 15 ++++++---
+ 9 files changed, 106 insertions(+), 74 deletions(-)
+
+diff --git a/src/crypto/elliptic/elliptic.go b/src/crypto/elliptic/elliptic.go
+index e2f71cdb63bab..bd5168c5fd842 100644
+--- a/src/crypto/elliptic/elliptic.go
++++ b/src/crypto/elliptic/elliptic.go
+@@ -277,7 +277,7 @@ var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
+ func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err error) {
+ N := curve.Params().N
+ bitSize := N.BitLen()
+- byteLen := (bitSize + 7) >> 3
++ byteLen := (bitSize + 7) / 8
+ priv = make([]byte, byteLen)
+
+ for x == nil {
+@@ -304,15 +304,14 @@ func GenerateKey(curve Curve, rand io.Reader) (priv []byte, x, y *big.Int, err e
+
+ // Marshal converts a point into the uncompressed form specified in section 4.3.6 of ANSI X9.62.
+ func Marshal(curve Curve, x, y *big.Int) []byte {
+- byteLen := (curve.Params().BitSize + 7) >> 3
++ byteLen := (curve.Params().BitSize + 7) / 8
+
+ ret := make([]byte, 1+2*byteLen)
+ ret[0] = 4 // uncompressed point
+
+- xBytes := x.Bytes()
+- copy(ret[1+byteLen-len(xBytes):], xBytes)
+- yBytes := y.Bytes()
+- copy(ret[1+2*byteLen-len(yBytes):], yBytes)
++ x.FillBytes(ret[1 : 1+byteLen])
++ y.FillBytes(ret[1+byteLen : 1+2*byteLen])
++
+ return ret
+ }
+
+@@ -320,7 +319,7 @@ func Marshal(curve Curve, x, y *big.Int) []byte {
+ // It is an error if the point is not in uncompressed form or is not on the curve.
+ // On error, x = nil.
+ func Unmarshal(curve Curve, data []byte) (x, y *big.Int) {
+- byteLen := (curve.Params().BitSize + 7) >> 3
++ byteLen := (curve.Params().BitSize + 7) / 8
+ if len(data) != 1+2*byteLen {
+ return
+ }
+diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
+index 499242ffc5b57..3208119ae1ff4 100644
+--- a/src/crypto/rsa/pkcs1v15.go
++++ b/src/crypto/rsa/pkcs1v15.go
+@@ -61,8 +61,7 @@ func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error)
+ m := new(big.Int).SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+
+- copyWithLeftPad(em, c.Bytes())
+- return em, nil
++ return c.FillBytes(em), nil
+ }
+
+ // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
+@@ -150,7 +149,7 @@ func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid
+ return
+ }
+
+- em = leftPad(m.Bytes(), k)
++ em = m.FillBytes(make([]byte, k))
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+@@ -256,8 +255,7 @@ func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []b
+ return nil, err
+ }
+
+- copyWithLeftPad(em, c.Bytes())
+- return em, nil
++ return c.FillBytes(em), nil
+ }
+
+ // VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature.
+@@ -286,7 +284,7 @@ func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte)
+
+ c := new(big.Int).SetBytes(sig)
+ m := encrypt(new(big.Int), pub, c)
+- em := leftPad(m.Bytes(), k)
++ em := m.FillBytes(make([]byte, k))
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+@@ -323,13 +321,3 @@ func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte,
+ }
+ return
+ }
+-
+-// copyWithLeftPad copies src to the end of dest, padding with zero bytes as
+-// needed.
+-func copyWithLeftPad(dest, src []byte) {
+- numPaddingBytes := len(dest) - len(src)
+- for i := 0; i < numPaddingBytes; i++ {
+- dest[i] = 0
+- }
+- copy(dest[numPaddingBytes:], src)
+-}
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index f9844d87329a8..b2adbedb28fa8 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -207,20 +207,19 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
++func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+ emBits := priv.N.BitLen() - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+- return
++ return nil, err
+ }
+ m := new(big.Int).SetBytes(em)
+ c, err := decryptAndCheck(rand, priv, m)
+ if err != nil {
+- return
++ return nil, err
+ }
+- s = make([]byte, priv.Size())
+- copyWithLeftPad(s, c.Bytes())
+- return
++ s := make([]byte, priv.Size())
++ return c.FillBytes(s), nil
+ }
+
+ const (
+@@ -296,11 +295,9 @@ func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts
+ m := encrypt(new(big.Int), pub, s)
+ emBits := pub.N.BitLen() - 1
+ emLen := (emBits + 7) / 8
+- emBytes := m.Bytes()
+- if emLen < len(emBytes) {
++ if m.BitLen() > emLen*8 {
+ return ErrVerification
+ }
+- em := make([]byte, emLen)
+- copyWithLeftPad(em, emBytes)
++ em := m.FillBytes(make([]byte, emLen))
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index b4bfa13defbdf..28eb5926c1a54 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -416,16 +416,9 @@ func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, l
+ m := new(big.Int)
+ m.SetBytes(em)
+ c := encrypt(new(big.Int), pub, m)
+- out := c.Bytes()
+
+- if len(out) < k {
+- // If the output is too small, we need to left-pad with zeros.
+- t := make([]byte, k)
+- copy(t[k-len(out):], out)
+- out = t
+- }
+-
+- return out, nil
++ out := make([]byte, k)
++ return c.FillBytes(out), nil
+ }
+
+ // ErrDecryption represents a failure to decrypt a message.
+@@ -597,12 +590,9 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+- // Converting the plaintext number to bytes will strip any
+- // leading zeros so we may have to left pad. We do this unconditionally
+- // to avoid leaking timing information. (Although we still probably
+- // leak the number of leading zeros. It's not clear that we can do
+- // anything about this.)
+- em := leftPad(m.Bytes(), k)
++ // We probably leak the number of leading zeros.
++ // It's not clear that we can do anything about this.
++ em := m.FillBytes(make([]byte, k))
+
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+@@ -643,15 +633,3 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+
+ return rest[index+1:], nil
+ }
+-
+-// leftPad returns a new slice of length size. The contents of input are right
+-// aligned in the new slice.
+-func leftPad(input []byte, size int) (out []byte) {
+- n := len(input)
+- if n > size {
+- n = size
+- }
+- out = make([]byte, size)
+- copy(out[len(out)-n:], input)
+- return
+-}
+diff --git a/src/crypto/tls/key_schedule.go b/src/crypto/tls/key_schedule.go
+index 2aab323202f7d..314016979afb8 100644
+--- a/src/crypto/tls/key_schedule.go
++++ b/src/crypto/tls/key_schedule.go
+@@ -173,11 +173,8 @@ func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte {
+ }
+
+ xShared, _ := curve.ScalarMult(x, y, p.privateKey)
+- sharedKey := make([]byte, (curve.Params().BitSize+7)>>3)
+- xBytes := xShared.Bytes()
+- copy(sharedKey[len(sharedKey)-len(xBytes):], xBytes)
+-
+- return sharedKey
++ sharedKey := make([]byte, (curve.Params().BitSize+7)/8)
++ return xShared.FillBytes(sharedKey)
+ }
+
+ type x25519Parameters struct {
+diff --git a/src/crypto/x509/sec1.go b/src/crypto/x509/sec1.go
+index 0bfb90cd5464a..52c108ff1d624 100644
+--- a/src/crypto/x509/sec1.go
++++ b/src/crypto/x509/sec1.go
+@@ -52,13 +52,10 @@ func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ // marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
+ // sets the curve ID to the given OID, or omits it if OID is nil.
+ func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
+- privateKeyBytes := key.D.Bytes()
+- paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+- copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
+-
++ privateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+- PrivateKey: paddedPrivateKey,
++ PrivateKey: key.D.FillBytes(privateKey),
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+diff --git a/src/math/big/int.go b/src/math/big/int.go
+index 8816cf5266cc4..65f32487b58c0 100644
+--- a/src/math/big/int.go
++++ b/src/math/big/int.go
+@@ -447,11 +447,26 @@ func (z *Int) SetBytes(buf []byte) *Int {
+ }
+
+ // Bytes returns the absolute value of x as a big-endian byte slice.
++//
++// To use a fixed length slice, or a preallocated one, use FillBytes.
+ func (x *Int) Bytes() []byte {
+ buf := make([]byte, len(x.abs)*_S)
+ return buf[x.abs.bytes(buf):]
+ }
+
++// FillBytes sets buf to the absolute value of x, storing it as a zero-extended
++// big-endian byte slice, and returns buf.
++//
++// If the absolute value of x doesn't fit in buf, FillBytes will panic.
++func (x *Int) FillBytes(buf []byte) []byte {
++ // Clear whole buffer. (This gets optimized into a memclr.)
++ for i := range buf {
++ buf[i] = 0
++ }
++ x.abs.bytes(buf)
++ return buf
++}
++
+ // BitLen returns the length of the absolute value of x in bits.
+ // The bit length of 0 is 0.
+ func (x *Int) BitLen() int {
+diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go
+index e3a1587b3f0ad..3c8557323a032 100644
+--- a/src/math/big/int_test.go
++++ b/src/math/big/int_test.go
+@@ -1840,3 +1840,57 @@ func BenchmarkDiv(b *testing.B) {
+ })
+ }
+ }
++
++func TestFillBytes(t *testing.T) {
++ checkResult := func(t *testing.T, buf []byte, want *Int) {
++ t.Helper()
++ got := new(Int).SetBytes(buf)
++ if got.CmpAbs(want) != 0 {
++ t.Errorf("got 0x%x, want 0x%x: %x", got, want, buf)
++ }
++ }
++ panics := func(f func()) (panic bool) {
++ defer func() { panic = recover() != nil }()
++ f()
++ return
++ }
++
++ for _, n := range []string{
++ "0",
++ "1000",
++ "0xffffffff",
++ "-0xffffffff",
++ "0xffffffffffffffff",
++ "0x10000000000000000",
++ "0xabababababababababababababababababababababababababa",
++ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
++ } {
++ t.Run(n, func(t *testing.T) {
++ t.Logf(n)
++ x, ok := new(Int).SetString(n, 0)
++ if !ok {
++ panic("invalid test entry")
++ }
++
++ // Perfectly sized buffer.
++ byteLen := (x.BitLen() + 7) / 8
++ buf := make([]byte, byteLen)
++ checkResult(t, x.FillBytes(buf), x)
++
++ // Way larger, checking all bytes get zeroed.
++ buf = make([]byte, 100)
++ for i := range buf {
++ buf[i] = 0xff
++ }
++ checkResult(t, x.FillBytes(buf), x)
++
++ // Too small.
++ if byteLen > 0 {
++ buf = make([]byte, byteLen-1)
++ if !panics(func() { x.FillBytes(buf) }) {
++ t.Errorf("expected panic for small buffer and value %x", x)
++ }
++ }
++ })
++ }
++}
+diff --git a/src/math/big/nat.go b/src/math/big/nat.go
+index c31ec5156b81d..6a3989bf9d82b 100644
+--- a/src/math/big/nat.go
++++ b/src/math/big/nat.go
+@@ -1476,19 +1476,26 @@ func (z nat) expNNMontgomery(x, y, m nat) nat {
+ }
+
+ // bytes writes the value of z into buf using big-endian encoding.
+-// len(buf) must be >= len(z)*_S. The value of z is encoded in the
+-// slice buf[i:]. The number i of unused bytes at the beginning of
+-// buf is returned as result.
++// The value of z is encoded in the slice buf[i:]. If the value of z
++// cannot be represented in buf, bytes panics. The number i of unused
++// bytes at the beginning of buf is returned as result.
+ func (z nat) bytes(buf []byte) (i int) {
+ i = len(buf)
+ for _, d := range z {
+ for j := 0; j < _S; j++ {
+ i--
+- buf[i] = byte(d)
++ if i >= 0 {
++ buf[i] = byte(d)
++ } else if byte(d) != 0 {
++ panic("math/big: buffer too small to fit value")
++ }
+ d >>= 8
+ }
+ }
+
++ if i < 0 {
++ i = 0
++ }
+ for i < len(buf) && buf[i] == 0 {
+ i++
+ }
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch
new file mode 100644
index 0000000000..ae9fcc170c
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287-pre3.patch
@@ -0,0 +1,86 @@
+From 8f676144ad7b7c91adb0c6e1ec89aaa6283c6807 Mon Sep 17 00:00:00 2001
+From: Himanshu Kishna Srivastava <28himanshu@gmail.com>
+Date: Tue, 16 Mar 2021 22:37:46 +0530
+Subject: [PATCH] crypto/rsa: fix salt length calculation with
+ PSSSaltLengthAuto
+
+When PSSSaltLength is set, the maximum salt length must equal:
+
+ (modulus_key_size - 1 + 7)/8 - hash_length - 2
+and for example, with a 4096 bit modulus key, and a SHA-1 hash,
+it should be:
+
+ (4096 -1 + 7)/8 - 20 - 2 = 490
+Previously we'd encounter this error:
+
+ crypto/rsa: key size too small for PSS signature
+
+Fixes #42741
+
+Change-Id: I18bb82c41c511d564b3f4c443f4b3a38ab010ac5
+Reviewed-on: https://go-review.googlesource.com/c/go/+/302230
+Reviewed-by: Emmanuel Odeke <emmanuel@orijtech.com>
+Reviewed-by: Filippo Valsorda <filippo@golang.org>
+Trust: Emmanuel Odeke <emmanuel@orijtech.com>
+Run-TryBot: Emmanuel Odeke <emmanuel@orijtech.com>
+TryBot-Result: Go Bot <gobot@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/8f676144ad7b7c91adb0c6e1ec89aaa6283c6807]
+CVE: CVE-2023-45287 #Dependency Patch3
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/pss.go | 2 +-
+ src/crypto/rsa/pss_test.go | 20 +++++++++++++++++++-
+ 2 files changed, 20 insertions(+), 2 deletions(-)
+
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index b2adbedb28fa8..814522de8181f 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -269,7 +269,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = priv.Size() - 2 - hash.Size()
++ saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+diff --git a/src/crypto/rsa/pss_test.go b/src/crypto/rsa/pss_test.go
+index dfa8d8bb5ad02..c3a6d468497cd 100644
+--- a/src/crypto/rsa/pss_test.go
++++ b/src/crypto/rsa/pss_test.go
+@@ -12,7 +12,7 @@ import (
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+- _ "crypto/sha256"
++ "crypto/sha256"
+ "encoding/hex"
+ "math/big"
+ "os"
+@@ -233,6 +233,24 @@ func TestPSSSigning(t *testing.T) {
+ }
+ }
+
++func TestSignWithPSSSaltLengthAuto(t *testing.T) {
++ key, err := GenerateKey(rand.Reader, 513)
++ if err != nil {
++ t.Fatal(err)
++ }
++ digest := sha256.Sum256([]byte("message"))
++ signature, err := key.Sign(rand.Reader, digest[:], &PSSOptions{
++ SaltLength: PSSSaltLengthAuto,
++ Hash: crypto.SHA256,
++ })
++ if err != nil {
++ t.Fatal(err)
++ }
++ if len(signature) == 0 {
++ t.Fatal("empty signature returned")
++ }
++}
++
+ func bigFromHex(hex string) *big.Int {
+ n, ok := new(big.Int).SetString(hex, 16)
+ if !ok {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch
new file mode 100644
index 0000000000..90a74255db
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45287.patch
@@ -0,0 +1,1697 @@
+From 8a81fdf165facdcefa06531de5af98a4db343035 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= <cronokirby@gmail.com>
+Date: Tue, 8 Jun 2021 21:36:06 +0200
+Subject: [PATCH] crypto/rsa: replace big.Int for encryption and decryption
+
+Infamously, big.Int does not provide constant-time arithmetic, making
+its use in cryptographic code quite tricky. RSA uses big.Int
+pervasively, in its public API, for key generation, precomputation, and
+for encryption and decryption. This is a known problem. One mitigation,
+blinding, is already in place during decryption. This helps mitigate the
+very leaky exponentiation operation. Because big.Int is fundamentally
+not constant-time, it's unfortunately difficult to guarantee that
+mitigations like these are completely effective.
+
+This patch removes the use of big.Int for encryption and decryption,
+replacing it with an internal nat type instead. Signing and verification
+are also affected, because they depend on encryption and decryption.
+
+Overall, this patch degrades performance by 55% for private key
+operations, and 4-5x for (much faster) public key operations.
+(Signatures do both, so the slowdown is worse than decryption.)
+
+name old time/op new time/op delta
+DecryptPKCS1v15/2048-8 1.50ms ± 0% 2.34ms ± 0% +56.44% (p=0.000 n=8+10)
+DecryptPKCS1v15/3072-8 4.40ms ± 0% 6.79ms ± 0% +54.33% (p=0.000 n=10+9)
+DecryptPKCS1v15/4096-8 9.31ms ± 0% 15.14ms ± 0% +62.60% (p=0.000 n=10+10)
+EncryptPKCS1v15/2048-8 8.16µs ± 0% 355.58µs ± 0% +4258.90% (p=0.000 n=10+9)
+DecryptOAEP/2048-8 1.50ms ± 0% 2.34ms ± 0% +55.68% (p=0.000 n=10+9)
+EncryptOAEP/2048-8 8.51µs ± 0% 355.95µs ± 0% +4082.75% (p=0.000 n=10+9)
+SignPKCS1v15/2048-8 1.51ms ± 0% 2.69ms ± 0% +77.94% (p=0.000 n=10+10)
+VerifyPKCS1v15/2048-8 7.25µs ± 0% 354.34µs ± 0% +4789.52% (p=0.000 n=9+9)
+SignPSS/2048-8 1.51ms ± 0% 2.70ms ± 0% +78.80% (p=0.000 n=9+10)
+VerifyPSS/2048-8 8.27µs ± 1% 355.65µs ± 0% +4199.39% (p=0.000 n=10+10)
+
+Keep in mind that this is without any assembly at all, and that further
+improvements are likely possible. I think having a review of the logic
+and the cryptography would be a good idea at this stage, before we
+complicate the code too much through optimization.
+
+The bulk of the work is in nat.go. This introduces two new types: nat,
+representing natural numbers, and modulus, representing moduli used in
+modular arithmetic.
+
+A nat has an "announced size", which may be larger than its "true size",
+the number of bits needed to represent this number. Operations on a nat
+will only ever leak its announced size, never its true size, or other
+information about its value. The size of a nat is always clear based on
+how its value is set. For example, x.mod(y, m) will make the announced
+size of x match that of m, since x is reduced modulo m.
+
+Operations assume that the announced size of the operands match what's
+expected (with a few exceptions). For example, x.modAdd(y, m) assumes
+that x and y have the same announced size as m, and that they're reduced
+modulo m.
+
+Nats are represented over unsatured bits.UintSize - 1 bit limbs. This
+means that we can't reuse the assembly routines for big.Int, which use
+saturated bits.UintSize limbs. The advantage of unsaturated limbs is
+that it makes Montgomery multiplication faster, by needing fewer
+registers in a hot loop. This makes exponentiation faster, which
+consists of many Montgomery multiplications.
+
+Moduli use nat internally. Unlike nat, the true size of a modulus always
+matches its announced size. When creating a modulus, any zero padding is
+removed. Moduli will also precompute constants when created, which is
+another reason why having a separate type is desirable.
+
+Updates #20654
+
+Co-authored-by: Filippo Valsorda <filippo@golang.org>
+Change-Id: I73b61f87d58ab912e80a9644e255d552cbadcced
+Reviewed-on: https://go-review.googlesource.com/c/go/+/326012
+Run-TryBot: Filippo Valsorda <filippo@golang.org>
+TryBot-Result: Gopher Robot <gobot@golang.org>
+Reviewed-by: Roland Shoemaker <roland@golang.org>
+Reviewed-by: Joedian Reid <joedian@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/8a81fdf165facdcefa06531de5af98a4db343035]
+CVE: CVE-2023-45287
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/crypto/rsa/example_test.go | 21 +-
+ src/crypto/rsa/nat.go | 626 +++++++++++++++++++++++++++++++++
+ src/crypto/rsa/nat_test.go | 384 ++++++++++++++++++++
+ src/crypto/rsa/pkcs1v15.go | 47 +--
+ src/crypto/rsa/pss.go | 50 ++-
+ src/crypto/rsa/pss_test.go | 10 +-
+ src/crypto/rsa/rsa.go | 174 ++++-----
+ 7 files changed, 1143 insertions(+), 169 deletions(-)
+ create mode 100644 src/crypto/rsa/nat.go
+ create mode 100644 src/crypto/rsa/nat_test.go
+
+diff --git a/src/crypto/rsa/example_test.go b/src/crypto/rsa/example_test.go
+index 1435b70..1963609 100644
+--- a/src/crypto/rsa/example_test.go
++++ b/src/crypto/rsa/example_test.go
+@@ -12,7 +12,6 @@ import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+- "io"
+ "os"
+ )
+
+@@ -36,21 +35,17 @@ import (
+ // a buffer that contains a random key. Thus, if the RSA result isn't
+ // well-formed, the implementation uses a random key in constant time.
+ func ExampleDecryptPKCS1v15SessionKey() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ // The hybrid scheme should use at least a 16-byte symmetric key. Here
+ // we read the random key that will be used if the RSA decryption isn't
+ // well-formed.
+ key := make([]byte, 32)
+- if _, err := io.ReadFull(rng, key); err != nil {
++ if _, err := rand.Read(key); err != nil {
+ panic("RNG failure")
+ }
+
+ rsaCiphertext, _ := hex.DecodeString("aabbccddeeff")
+
+- if err := DecryptPKCS1v15SessionKey(rng, rsaPrivateKey, rsaCiphertext, key); err != nil {
++ if err := DecryptPKCS1v15SessionKey(nil, rsaPrivateKey, rsaCiphertext, key); err != nil {
+ // Any errors that result will be “public” – meaning that they
+ // can be determined without any secret information. (For
+ // instance, if the length of key is impossible given the RSA
+@@ -86,10 +81,6 @@ func ExampleDecryptPKCS1v15SessionKey() {
+ }
+
+ func ExampleSignPKCS1v15() {
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+ message := []byte("message to be signed")
+
+ // Only small messages can be signed directly; thus the hash of a
+@@ -99,7 +90,7 @@ func ExampleSignPKCS1v15() {
+ // of writing (2016).
+ hashed := sha256.Sum256(message)
+
+- signature, err := SignPKCS1v15(rng, rsaPrivateKey, crypto.SHA256, hashed[:])
++ signature, err := SignPKCS1v15(nil, rsaPrivateKey, crypto.SHA256, hashed[:])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from signing: %s\n", err)
+ return
+@@ -151,11 +142,7 @@ func ExampleDecryptOAEP() {
+ ciphertext, _ := hex.DecodeString("4d1ee10e8f286390258c51a5e80802844c3e6358ad6690b7285218a7c7ed7fc3a4c7b950fbd04d4b0239cc060dcc7065ca6f84c1756deb71ca5685cadbb82be025e16449b905c568a19c088a1abfad54bf7ecc67a7df39943ec511091a34c0f2348d04e058fcff4d55644de3cd1d580791d4524b92f3e91695582e6e340a1c50b6c6d78e80b4e42c5b4d45e479b492de42bbd39cc642ebb80226bb5200020d501b24a37bcc2ec7f34e596b4fd6b063de4858dbf5a4e3dd18e262eda0ec2d19dbd8e890d672b63d368768360b20c0b6b8592a438fa275e5fa7f60bef0dd39673fd3989cc54d2cb80c08fcd19dacbc265ee1c6014616b0e04ea0328c2a04e73460")
+ label := []byte("orders")
+
+- // crypto/rand.Reader is a good source of entropy for blinding the RSA
+- // operation.
+- rng := rand.Reader
+-
+- plaintext, err := DecryptOAEP(sha256.New(), rng, test2048Key, ciphertext, label)
++ plaintext, err := DecryptOAEP(sha256.New(), nil, test2048Key, ciphertext, label)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error from decryption: %s\n", err)
+ return
+diff --git a/src/crypto/rsa/nat.go b/src/crypto/rsa/nat.go
+new file mode 100644
+index 0000000..da521c2
+--- /dev/null
++++ b/src/crypto/rsa/nat.go
+@@ -0,0 +1,626 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "math/big"
++ "math/bits"
++)
++
++const (
++ // _W is the number of bits we use for our limbs.
++ _W = bits.UintSize - 1
++ // _MASK selects _W bits from a full machine word.
++ _MASK = (1 << _W) - 1
++)
++
++// choice represents a constant-time boolean. The value of choice is always
++// either 1 or 0. We use an int instead of bool in order to make decisions in
++// constant time by turning it into a mask.
++type choice uint
++
++func not(c choice) choice { return 1 ^ c }
++
++const yes = choice(1)
++const no = choice(0)
++
++// ctSelect returns x if on == 1, and y if on == 0. The execution time of this
++// function does not depend on its inputs. If on is any value besides 1 or 0,
++// the result is undefined.
++func ctSelect(on choice, x, y uint) uint {
++ // When on == 1, mask is 0b111..., otherwise mask is 0b000...
++ mask := -uint(on)
++ // When mask is all zeros, we just have y, otherwise, y cancels with itself.
++ return y ^ (mask & (y ^ x))
++}
++
++// ctEq returns 1 if x == y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctEq(x, y uint) choice {
++ // If x != y, then either x - y or y - x will generate a carry.
++ _, c1 := bits.Sub(x, y, 0)
++ _, c2 := bits.Sub(y, x, 0)
++ return not(choice(c1 | c2))
++}
++
++// ctGeq returns 1 if x >= y, and 0 otherwise. The execution time of this
++// function does not depend on its inputs.
++func ctGeq(x, y uint) choice {
++ // If x < y, then x - y generates a carry.
++ _, carry := bits.Sub(x, y, 0)
++ return not(choice(carry))
++}
++
++// nat represents an arbitrary natural number
++//
++// Each nat has an announced length, which is the number of limbs it has stored.
++// Operations on this number are allowed to leak this length, but will not leak
++// any information about the values contained in those limbs.
++type nat struct {
++ // limbs is a little-endian representation in base 2^W with
++ // W = bits.UintSize - 1. The top bit is always unset between operations.
++ //
++ // The top bit is left unset to optimize Montgomery multiplication, in the
++ // inner loop of exponentiation. Using fully saturated limbs would leave us
++ // working with 129-bit numbers on 64-bit platforms, wasting a lot of space,
++ // and thus time.
++ limbs []uint
++}
++
++// expand expands x to n limbs, leaving its value unchanged.
++func (x *nat) expand(n int) *nat {
++ for len(x.limbs) > n {
++ if x.limbs[len(x.limbs)-1] != 0 {
++ panic("rsa: internal error: shrinking nat")
++ }
++ x.limbs = x.limbs[:len(x.limbs)-1]
++ }
++ if cap(x.limbs) < n {
++ newLimbs := make([]uint, n)
++ copy(newLimbs, x.limbs)
++ x.limbs = newLimbs
++ return x
++ }
++ extraLimbs := x.limbs[len(x.limbs):n]
++ for i := range extraLimbs {
++ extraLimbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// reset returns a zero nat of n limbs, reusing x's storage if n <= cap(x.limbs).
++func (x *nat) reset(n int) *nat {
++ if cap(x.limbs) < n {
++ x.limbs = make([]uint, n)
++ return x
++ }
++ for i := range x.limbs {
++ x.limbs[i] = 0
++ }
++ x.limbs = x.limbs[:n]
++ return x
++}
++
++// clone returns a new nat, with the same value and announced length as x.
++func (x *nat) clone() *nat {
++ out := &nat{make([]uint, len(x.limbs))}
++ copy(out.limbs, x.limbs)
++ return out
++}
++
++// natFromBig creates a new natural number from a big.Int.
++//
++// The announced length of the resulting nat is based on the actual bit size of
++// the input, ignoring leading zeroes.
++func natFromBig(x *big.Int) *nat {
++ xLimbs := x.Bits()
++ bitSize := bigBitLen(x)
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := range xLimbs {
++ xi := uint(xLimbs[i])
++ out.limbs[outI] |= (xi << shift) & _MASK
++ outI++
++ if outI == requiredLimbs {
++ return out
++ }
++ out.limbs[outI] = xi >> (_W - shift)
++ shift++ // this assumes bits.UintSize - _W = 1
++ if shift == _W {
++ shift = 0
++ outI++
++ }
++ }
++ return out
++}
++
++// fillBytes sets bytes to x as a zero-extended big-endian byte slice.
++//
++// If bytes is not long enough to contain the number or at least len(x.limbs)-1
++// limbs, or has zero length, fillBytes will panic.
++func (x *nat) fillBytes(bytes []byte) []byte {
++ if len(bytes) == 0 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ for i := range bytes {
++ bytes[i] = 0
++ }
++ shift := 0
++ outI := len(bytes) - 1
++ for i, limb := range x.limbs {
++ remainingBits := _W
++ for remainingBits >= 8 {
++ bytes[outI] |= byte(limb) << shift
++ consumed := 8 - shift
++ limb >>= consumed
++ remainingBits -= consumed
++ shift = 0
++ outI--
++ if outI < 0 {
++ if limb != 0 || i < len(x.limbs)-1 {
++ panic("nat: fillBytes invoked with too small buffer")
++ }
++ return bytes
++ }
++ }
++ bytes[outI] = byte(limb)
++ shift = remainingBits
++ }
++ return bytes
++}
++
++// natFromBytes converts a slice of big-endian bytes into a nat.
++//
++// The announced length of the output depends on the length of bytes. Unlike
++// big.Int, creating a nat will not remove leading zeros.
++func natFromBytes(bytes []byte) *nat {
++ bitSize := len(bytes) * 8
++ requiredLimbs := (bitSize + _W - 1) / _W
++
++ out := &nat{make([]uint, requiredLimbs)}
++ outI := 0
++ shift := 0
++ for i := len(bytes) - 1; i >= 0; i-- {
++ bi := bytes[i]
++ out.limbs[outI] |= uint(bi) << shift
++ shift += 8
++ if shift >= _W {
++ shift -= _W
++ out.limbs[outI] &= _MASK
++ outI++
++ if shift > 0 {
++ out.limbs[outI] = uint(bi) >> (8 - shift)
++ }
++ }
++ }
++ return out
++}
++
++// cmpEq returns 1 if x == y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpEq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ equal := yes
++ for i := 0; i < size; i++ {
++ equal &= ctEq(xLimbs[i], yLimbs[i])
++ }
++ return equal
++}
++
++// cmpGeq returns 1 if x >= y, and 0 otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) cmpGeq(y *nat) choice {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ var c uint
++ for i := 0; i < size; i++ {
++ c = (xLimbs[i] - yLimbs[i] - c) >> _W
++ }
++ // If there was a carry, then subtracting y underflowed, so
++ // x is not greater than or equal to y.
++ return not(choice(c))
++}
++
++// assign sets x <- y if on == 1, and does nothing otherwise.
++//
++// Both operands must have the same announced length.
++func (x *nat) assign(on choice, y *nat) *nat {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ xLimbs[i] = ctSelect(on, yLimbs[i], xLimbs[i])
++ }
++ return x
++}
++
++// add computes x += y if on == 1, and does nothing otherwise. It returns the
++// carry of the addition regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) add(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] + yLimbs[i] + c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// sub computes x -= y if on == 1, and does nothing otherwise. It returns the
++// borrow of the subtraction regardless of on.
++//
++// Both operands must have the same announced length.
++func (x *nat) sub(on choice, y *nat) (c uint) {
++ // Eliminate bounds checks in the loop.
++ size := len(x.limbs)
++ xLimbs := x.limbs[:size]
++ yLimbs := y.limbs[:size]
++
++ for i := 0; i < size; i++ {
++ res := xLimbs[i] - yLimbs[i] - c
++ xLimbs[i] = ctSelect(on, res&_MASK, xLimbs[i])
++ c = res >> _W
++ }
++ return
++}
++
++// modulus is used for modular arithmetic, precomputing relevant constants.
++//
++// Moduli are assumed to be odd numbers. Moduli can also leak the exact
++// number of bits needed to store their value, and are stored without padding.
++//
++// Their actual value is still kept secret.
++type modulus struct {
++ // The underlying natural number for this modulus.
++ //
++ // This will be stored without any padding, and shouldn't alias with any
++ // other natural number being used.
++ nat *nat
++ leading int // number of leading zeros in the modulus
++ m0inv uint // -nat.limbs[0]⁻¹ mod _W
++}
++
++// minusInverseModW computes -x⁻¹ mod _W with x odd.
++//
++// This operation is used to precompute a constant involved in Montgomery
++// multiplication.
++func minusInverseModW(x uint) uint {
++ // Every iteration of this loop doubles the least-significant bits of
++ // correct inverse in y. The first three bits are already correct (1⁻¹ = 1,
++ // 3⁻¹ = 3, 5⁻¹ = 5, and 7⁻¹ = 7 mod 8), so doubling five times is enough
++ // for 61 bits (and wastes only one iteration for 31 bits).
++ //
++ // See https://crypto.stackexchange.com/a/47496.
++ y := x
++ for i := 0; i < 5; i++ {
++ y = y * (2 - x*y)
++ }
++ return (1 << _W) - (y & _MASK)
++}
++
++// modulusFromNat creates a new modulus from a nat.
++//
++// The nat should be odd, nonzero, and the number of significant bits in the
++// number should be leakable. The nat shouldn't be reused.
++func modulusFromNat(nat *nat) *modulus {
++ m := &modulus{}
++ m.nat = nat
++ size := len(m.nat.limbs)
++ for m.nat.limbs[size-1] == 0 {
++ size--
++ }
++ m.nat.limbs = m.nat.limbs[:size]
++ m.leading = _W - bitLen(m.nat.limbs[size-1])
++ m.m0inv = minusInverseModW(m.nat.limbs[0])
++ return m
++}
++
++// bitLen is a version of bits.Len that only leaks the bit length of n, but not
++// its value. bits.Len and bits.LeadingZeros use a lookup table for the
++// low-order bits on some architectures.
++func bitLen(n uint) int {
++ var len int
++ // We assume, here and elsewhere, that comparison to zero is constant time
++ // with respect to different non-zero values.
++ for n != 0 {
++ len++
++ n >>= 1
++ }
++ return len
++}
++
++// bigBitLen is a version of big.Int.BitLen that only leaks the bit length of x,
++// but not its value. big.Int.BitLen uses bits.Len.
++func bigBitLen(x *big.Int) int {
++ xLimbs := x.Bits()
++ fullLimbs := len(xLimbs) - 1
++ topLimb := uint(xLimbs[len(xLimbs)-1])
++ return fullLimbs*bits.UintSize + bitLen(topLimb)
++}
++
++// modulusSize returns the size of m in bytes.
++func modulusSize(m *modulus) int {
++ bits := len(m.nat.limbs)*_W - int(m.leading)
++ return (bits + 7) / 8
++}
++
++// shiftIn calculates x = x << _W + y mod m.
++//
++// This assumes that x is already reduced mod m, and that y < 2^_W.
++func (x *nat) shiftIn(y uint, m *modulus) *nat {
++ d := new(nat).resetFor(m)
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ xLimbs := x.limbs[:size]
++ dLimbs := d.limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ // Each iteration of this loop computes x = 2x + b mod m, where b is a bit
++ // from y. Effectively, it left-shifts x and adds y one bit at a time,
++ // reducing it every time.
++ //
++ // To do the reduction, each iteration computes both 2x + b and 2x + b - m.
++ // The next iteration (and finally the return line) will use either result
++ // based on whether the subtraction underflowed.
++ needSubtraction := no
++ for i := _W - 1; i >= 0; i-- {
++ carry := (y >> i) & 1
++ var borrow uint
++ for i := 0; i < size; i++ {
++ l := ctSelect(needSubtraction, dLimbs[i], xLimbs[i])
++
++ res := l<<1 + carry
++ xLimbs[i] = res & _MASK
++ carry = res >> _W
++
++ res = xLimbs[i] - mLimbs[i] - borrow
++ dLimbs[i] = res & _MASK
++ borrow = res >> _W
++ }
++ // See modAdd for how carry (aka overflow), borrow (aka underflow), and
++ // needSubtraction relate.
++ needSubtraction = ctEq(carry, borrow)
++ }
++ return x.assign(needSubtraction, d)
++}
++
++// mod calculates out = x mod m.
++//
++// This works regardless how large the value of x is.
++//
++// The output will be resized to the size of m and overwritten.
++func (out *nat) mod(x *nat, m *modulus) *nat {
++ out.resetFor(m)
++ // Working our way from the most significant to the least significant limb,
++ // we can insert each limb at the least significant position, shifting all
++ // previous limbs left by _W. This way each limb will get shifted by the
++ // correct number of bits. We can insert at least N - 1 limbs without
++ // overflowing m. After that, we need to reduce every time we shift.
++ i := len(x.limbs) - 1
++ // For the first N - 1 limbs we can skip the actual shifting and position
++ // them at the shifted position, which starts at min(N - 2, i).
++ start := len(m.nat.limbs) - 2
++ if i < start {
++ start = i
++ }
++ for j := start; j >= 0; j-- {
++ out.limbs[j] = x.limbs[i]
++ i--
++ }
++ // We shift in the remaining limbs, reducing modulo m each time.
++ for i >= 0 {
++ out.shiftIn(x.limbs[i], m)
++ i--
++ }
++ return out
++}
++
++// expandFor ensures out has the right size to work with operations modulo m.
++//
++// This assumes that out has as many or fewer limbs than m, or that the extra
++// limbs are all zero (which may happen when decoding a value that has leading
++// zeroes in its bytes representation that spill over the limb threshold).
++func (out *nat) expandFor(m *modulus) *nat {
++ return out.expand(len(m.nat.limbs))
++}
++
++// resetFor ensures out has the right size to work with operations modulo m.
++//
++// out is zeroed and may start at any size.
++func (out *nat) resetFor(m *modulus) *nat {
++ return out.reset(len(m.nat.limbs))
++}
++
++// modSub computes x = x - y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modSub(y *nat, m *modulus) *nat {
++ underflow := x.sub(yes, y)
++ // If the subtraction underflowed, add m.
++ x.add(choice(underflow), m.nat)
++ return x
++}
++
++// modAdd computes x = x + y mod m.
++//
++// The length of both operands must be the same as the modulus. Both operands
++// must already be reduced modulo m.
++func (x *nat) modAdd(y *nat, m *modulus) *nat {
++ overflow := x.add(yes, y)
++ underflow := not(x.cmpGeq(m.nat)) // x < m
++
++ // Three cases are possible:
++ //
++ // - overflow = 0, underflow = 0
++ //
++ // In this case, addition fits in our limbs, but we can still subtract away
++ // m without an underflow, so we need to perform the subtraction to reduce
++ // our result.
++ //
++ // - overflow = 0, underflow = 1
++ //
++ // The addition fits in our limbs, but we can't subtract m without
++ // underflowing. The result is already reduced.
++ //
++ // - overflow = 1, underflow = 1
++ //
++ // The addition does not fit in our limbs, and the subtraction's borrow
++ // would cancel out with the addition's carry. We need to subtract m to
++ // reduce our result.
++ //
++ // The overflow = 1, underflow = 0 case is not possible, because y is at
++ // most m - 1, and if adding m - 1 overflows, then subtracting m must
++ // necessarily underflow.
++ needSubtraction := ctEq(overflow, uint(underflow))
++
++ x.sub(needSubtraction, m.nat)
++ return x
++}
++
++// montgomeryRepresentation calculates x = x * R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs).
++//
++// Faster Montgomery multiplication replaces standard modular multiplication for
++// numbers in this representation.
++//
++// This assumes that x is already reduced mod m.
++func (x *nat) montgomeryRepresentation(m *modulus) *nat {
++ for i := 0; i < len(m.nat.limbs); i++ {
++ x.shiftIn(0, m) // x = x * 2^_W mod m
++ }
++ return x
++}
++
++// montgomeryMul calculates d = a * b / R mod m, with R = 2^(_W * n) and
++// n = len(m.nat.limbs), using the Montgomery Multiplication technique.
++//
++// All inputs should be the same length, not aliasing d, and already
++// reduced modulo m. d will be resized to the size of m and overwritten.
++func (d *nat) montgomeryMul(a *nat, b *nat, m *modulus) *nat {
++ // See https://bearssl.org/bigint.html#montgomery-reduction-and-multiplication
++ // for a description of the algorithm.
++
++ // Eliminate bounds checks in the loop.
++ size := len(m.nat.limbs)
++ aLimbs := a.limbs[:size]
++ bLimbs := b.limbs[:size]
++ dLimbs := d.resetFor(m).limbs[:size]
++ mLimbs := m.nat.limbs[:size]
++
++ var overflow uint
++ for i := 0; i < size; i++ {
++ f := ((dLimbs[0] + aLimbs[i]*bLimbs[0]) * m.m0inv) & _MASK
++ carry := uint(0)
++ for j := 0; j < size; j++ {
++ // z = d[j] + a[i] * b[j] + f * m[j] + carry <= 2^(2W+1) - 2^(W+1) + 2^W
++ hi, lo := bits.Mul(aLimbs[i], bLimbs[j])
++ z_lo, c := bits.Add(dLimbs[j], lo, 0)
++ z_hi, _ := bits.Add(0, hi, c)
++ hi, lo = bits.Mul(f, mLimbs[j])
++ z_lo, c = bits.Add(z_lo, lo, 0)
++ z_hi, _ = bits.Add(z_hi, hi, c)
++ z_lo, c = bits.Add(z_lo, carry, 0)
++ z_hi, _ = bits.Add(z_hi, 0, c)
++ if j > 0 {
++ dLimbs[j-1] = z_lo & _MASK
++ }
++ carry = z_hi<<1 | z_lo>>_W // carry <= 2^(W+1) - 2
++ }
++ z := overflow + carry // z <= 2^(W+1) - 1
++ dLimbs[size-1] = z & _MASK
++ overflow = z >> _W // overflow <= 1
++ }
++ // See modAdd for how overflow, underflow, and needSubtraction relate.
++ underflow := not(d.cmpGeq(m.nat)) // d < m
++ needSubtraction := ctEq(overflow, uint(underflow))
++ d.sub(needSubtraction, m.nat)
++
++ return d
++}
++
++// modMul calculates x *= y mod m.
++//
++// x and y must already be reduced modulo m, they must share its announced
++// length, and they may not alias.
++func (x *nat) modMul(y *nat, m *modulus) *nat {
++ // A Montgomery multiplication by a value out of the Montgomery domain
++ // takes the result out of Montgomery representation.
++ xR := x.clone().montgomeryRepresentation(m) // xR = x * R mod m
++ return x.montgomeryMul(xR, y, m) // x = xR * y / R mod m
++}
++
++// exp calculates out = x^e mod m.
++//
++// The exponent e is represented in big-endian order. The output will be resized
++// to the size of m and overwritten. x must already be reduced modulo m.
++func (out *nat) exp(x *nat, e []byte, m *modulus) *nat {
++ // We use a 4 bit window. For our RSA workload, 4 bit windows are faster
++ // than 2 bit windows, but use an extra 12 nats worth of scratch space.
++ // Using bit sizes that don't divide 8 are more complex to implement.
++ table := make([]*nat, (1<<4)-1) // table[i] = x ^ (i+1)
++ table[0] = x.clone().montgomeryRepresentation(m)
++ for i := 1; i < len(table); i++ {
++ table[i] = new(nat).expandFor(m)
++ table[i].montgomeryMul(table[i-1], table[0], m)
++ }
++
++ out.resetFor(m)
++ out.limbs[0] = 1
++ out.montgomeryRepresentation(m)
++ t0 := new(nat).expandFor(m)
++ t1 := new(nat).expandFor(m)
++ for _, b := range e {
++ for _, j := range []int{4, 0} {
++ // Square four times.
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++ t1.montgomeryMul(out, out, m)
++ out.montgomeryMul(t1, t1, m)
++
++ // Select x^k in constant time from the table.
++ k := uint((b >> j) & 0b1111)
++ for i := range table {
++ t0.assign(ctEq(k, uint(i+1)), table[i])
++ }
++
++ // Multiply by x^k, discarding the result if k = 0.
++ t1.montgomeryMul(out, t0, m)
++ out.assign(not(ctEq(k, 0)), t1)
++ }
++ }
++
++ // By Montgomery multiplying with 1 not in Montgomery representation, we
++ // convert out back from Montgomery representation, because it works out to
++ // dividing by R.
++ t0.assign(yes, out)
++ t1.resetFor(m)
++ t1.limbs[0] = 1
++ out.montgomeryMul(t0, t1, m)
++
++ return out
++}
+diff --git a/src/crypto/rsa/nat_test.go b/src/crypto/rsa/nat_test.go
+new file mode 100644
+index 0000000..3e6eb10
+--- /dev/null
++++ b/src/crypto/rsa/nat_test.go
+@@ -0,0 +1,384 @@
++// Copyright 2021 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package rsa
++
++import (
++ "bytes"
++ "math/big"
++ "math/bits"
++ "math/rand"
++ "reflect"
++ "testing"
++ "testing/quick"
++)
++
++// Generate generates an even nat. It's used by testing/quick to produce random
++// *nat values for quick.Check invocations.
++func (*nat) Generate(r *rand.Rand, size int) reflect.Value {
++ limbs := make([]uint, size)
++ for i := 0; i < size; i++ {
++ limbs[i] = uint(r.Uint64()) & ((1 << _W) - 2)
++ }
++ return reflect.ValueOf(&nat{limbs})
++}
++
++func testModAddCommutative(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ aPlusB := a.clone()
++ aPlusB.modAdd(b, m)
++ bPlusA := b.clone()
++ bPlusA.modAdd(a, m)
++ return aPlusB.cmpEq(bPlusA) == 1
++}
++
++func TestModAddCommutative(t *testing.T) {
++ err := quick.Check(testModAddCommutative, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testModSubThenAddIdentity(a *nat, b *nat) bool {
++ mLimbs := make([]uint, len(a.limbs))
++ for i := 0; i < len(mLimbs); i++ {
++ mLimbs[i] = _MASK
++ }
++ m := modulusFromNat(&nat{mLimbs})
++ original := a.clone()
++ a.modSub(b, m)
++ a.modAdd(b, m)
++ return a.cmpEq(original) == 1
++}
++
++func TestModSubThenAddIdentity(t *testing.T) {
++ err := quick.Check(testModSubThenAddIdentity, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func testMontgomeryRoundtrip(a *nat) bool {
++ one := &nat{make([]uint, len(a.limbs))}
++ one.limbs[0] = 1
++ aPlusOne := a.clone()
++ aPlusOne.add(1, one)
++ m := modulusFromNat(aPlusOne)
++ monty := a.clone()
++ monty.montgomeryRepresentation(m)
++ aAgain := monty.clone()
++ aAgain.montgomeryMul(monty, one, m)
++ return a.cmpEq(aAgain) == 1
++}
++
++func TestMontgomeryRoundtrip(t *testing.T) {
++ err := quick.Check(testMontgomeryRoundtrip, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++}
++
++func TestFromBig(t *testing.T) {
++ expected := []byte{0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
++ theBig := new(big.Int).SetBytes(expected)
++ actual := natFromBig(theBig).fillBytes(make([]byte, len(expected)))
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%+x != %+x", actual, expected)
++ }
++}
++
++func TestFillBytes(t *testing.T) {
++ xBytes := []byte{0xAA, 0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
++ x := natFromBytes(xBytes)
++ for l := 20; l >= len(xBytes); l-- {
++ buf := make([]byte, l)
++ rand.Read(buf)
++ actual := x.fillBytes(buf)
++ expected := make([]byte, l)
++ copy(expected[l-len(xBytes):], xBytes)
++ if !bytes.Equal(actual, expected) {
++ t.Errorf("%d: %+v != %+v", l, actual, expected)
++ }
++ }
++ for l := len(xBytes) - 1; l >= 0; l-- {
++ (func() {
++ defer func() {
++ if recover() == nil {
++ t.Errorf("%d: expected panic", l)
++ }
++ }()
++ x.fillBytes(make([]byte, l))
++ })()
++ }
++}
++
++func TestFromBytes(t *testing.T) {
++ f := func(xBytes []byte) bool {
++ if len(xBytes) == 0 {
++ return true
++ }
++ actual := natFromBytes(xBytes).fillBytes(make([]byte, len(xBytes)))
++ if !bytes.Equal(actual, xBytes) {
++ t.Errorf("%+x != %+x", actual, xBytes)
++ return false
++ }
++ return true
++ }
++
++ err := quick.Check(f, &quick.Config{})
++ if err != nil {
++ t.Error(err)
++ }
++
++ f([]byte{0xFF, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88})
++ f(bytes.Repeat([]byte{0xFF}, _W))
++}
++
++func TestShiftIn(t *testing.T) {
++ if bits.UintSize != 64 {
++ t.Skip("examples are only valid in 64 bit")
++ }
++ examples := []struct {
++ m, x, expected []byte
++ y uint64
++ }{{
++ m: []byte{13},
++ x: []byte{0},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{7},
++ }, {
++ m: []byte{13},
++ x: []byte{7},
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{11},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: make([]byte, 9),
++ y: 0x7FFF_FFFF_FFFF_FFFF,
++ expected: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ }, {
++ m: []byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d},
++ x: []byte{0x00, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++ y: 0,
++ expected: []byte{0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08},
++ }}
++
++ for i, tt := range examples {
++ m := modulusFromNat(natFromBytes(tt.m))
++ got := natFromBytes(tt.x).expandFor(m).shiftIn(uint(tt.y), m)
++ if got.cmpEq(natFromBytes(tt.expected).expandFor(m)) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.expected)
++ }
++ }
++}
++
++func TestModulusAndNatSizes(t *testing.T) {
++ // These are 126 bit (2 * _W on 64-bit architectures) values, serialized as
++ // 128 bits worth of bytes. If leading zeroes are stripped, they fit in two
++ // limbs, if they are not, they fit in three. This can be a problem because
++ // modulus strips leading zeroes and nat does not.
++ m := modulusFromNat(natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}))
++ x := natFromBytes([]byte{
++ 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe})
++ x.expandFor(m) // must not panic for shrinking
++}
++
++func TestExpand(t *testing.T) {
++ sliced := []uint{1, 2, 3, 4}
++ examples := []struct {
++ in []uint
++ n int
++ out []uint
++ }{{
++ []uint{1, 2},
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ sliced[:2],
++ 4,
++ []uint{1, 2, 0, 0},
++ }, {
++ []uint{1, 2},
++ 2,
++ []uint{1, 2},
++ }, {
++ []uint{1, 2, 0},
++ 2,
++ []uint{1, 2},
++ }}
++
++ for i, tt := range examples {
++ got := (&nat{tt.in}).expand(tt.n)
++ if len(got.limbs) != len(tt.out) || got.cmpEq(&nat{tt.out}) != 1 {
++ t.Errorf("%d: got %x, expected %x", i, got, tt.out)
++ }
++ }
++}
++
++func TestMod(t *testing.T) {
++ m := modulusFromNat(natFromBytes([]byte{0x06, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d}))
++ x := natFromBytes([]byte{0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
++ out := new(nat)
++ out.mod(x, m)
++ expected := natFromBytes([]byte{0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09})
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func TestModSub(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modSub(y, m)
++ expected := &nat{[]uint{12}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modSub(y, m)
++ expected = &nat{[]uint{5}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestModAdd(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{6}}
++ y := &nat{[]uint{7}}
++ x.modAdd(y, m)
++ expected := &nat{[]uint{0}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++ x.modAdd(y, m)
++ expected = &nat{[]uint{7}}
++ if x.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", x, expected)
++ }
++}
++
++func TestExp(t *testing.T) {
++ m := modulusFromNat(&nat{[]uint{13}})
++ x := &nat{[]uint{3}}
++ out := &nat{[]uint{0}}
++ out.exp(x, []byte{12}, m)
++ expected := &nat{[]uint{1}}
++ if out.cmpEq(expected) != 1 {
++ t.Errorf("%+v != %+v", out, expected)
++ }
++}
++
++func makeBenchmarkModulus() *modulus {
++ m := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ m[i] = _MASK
++ }
++ return modulusFromNat(&nat{limbs: m})
++}
++
++func makeBenchmarkValue() *nat {
++ x := make([]uint, 32)
++ for i := 0; i < 32; i++ {
++ x[i] = _MASK - 1
++ }
++ return &nat{limbs: x}
++}
++
++func makeBenchmarkExponent() []byte {
++ e := make([]byte, 256)
++ for i := 0; i < 32; i++ {
++ e[i] = 0xFF
++ }
++ return e
++}
++
++func BenchmarkModAdd(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modAdd(y, m)
++ }
++}
++
++func BenchmarkModSub(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modSub(y, m)
++ }
++}
++
++func BenchmarkMontgomeryRepr(b *testing.B) {
++ x := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.montgomeryRepresentation(m)
++ }
++}
++
++func BenchmarkMontgomeryMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.montgomeryMul(x, y, m)
++ }
++}
++
++func BenchmarkModMul(b *testing.B) {
++ x := makeBenchmarkValue()
++ y := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ x.modMul(y, m)
++ }
++}
++
++func BenchmarkExpBig(b *testing.B) {
++ out := new(big.Int)
++ exponentBytes := makeBenchmarkExponent()
++ x := new(big.Int).SetBytes(exponentBytes)
++ e := new(big.Int).SetBytes(exponentBytes)
++ n := new(big.Int).SetBytes(exponentBytes)
++ one := new(big.Int).SetUint64(1)
++ n.Add(n, one)
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.Exp(x, e, n)
++ }
++}
++
++func BenchmarkExp(b *testing.B) {
++ x := makeBenchmarkValue()
++ e := makeBenchmarkExponent()
++ out := makeBenchmarkValue()
++ m := makeBenchmarkModulus()
++
++ b.ResetTimer()
++ for i := 0; i < b.N; i++ {
++ out.exp(x, e, m)
++ }
++}
+diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go
+index a216be3..ce89f92 100644
+--- a/src/crypto/rsa/pkcs1v15.go
++++ b/src/crypto/rsa/pkcs1v15.go
+@@ -9,7 +9,6 @@ import (
+ "crypto/subtle"
+ "errors"
+ "io"
+- "math/big"
+
+ "crypto/internal/randutil"
+ )
+@@ -58,14 +57,11 @@ func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) ([]byte, error)
+ em[len(em)-len(msg)-1] = 0
+ copy(mm, msg)
+
+- m := new(big.Int).SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- return c.FillBytes(em), nil
++ return encrypt(pub, em), nil
+ }
+
+ // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ //
+ // Note that whether this function returns an error or not discloses secret
+ // information. If an attacker can cause this function to run repeatedly and
+@@ -76,7 +72,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ if err := checkPub(&priv.PublicKey); err != nil {
+ return nil, err
+ }
+- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, out, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -87,7 +83,7 @@ func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byt
+ }
+
+ // DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5.
+-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ // It returns an error if the ciphertext is the wrong length or if the
+ // ciphertext is greater than the public modulus. Otherwise, no error is
+ // returned. If the padding is valid, the resulting plaintext message is copied
+@@ -114,7 +110,7 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return ErrDecryption
+ }
+
+- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
++ valid, em, index, err := decryptPKCS1v15(priv, ciphertext)
+ if err != nil {
+ return err
+ }
+@@ -130,26 +126,24 @@ func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []by
+ return nil
+ }
+
+-// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
+-// rand is not nil. It returns one or zero in valid that indicates whether the
+-// plaintext was correctly structured. In either case, the plaintext is
+-// returned in em so that it may be read independently of whether it was valid
+-// in order to maintain constant memory access patterns. If the plaintext was
+-// valid then index contains the index of the original message in em.
+-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
++// decryptPKCS1v15 decrypts ciphertext using priv. It returns one or zero in
++// valid that indicates whether the plaintext was correctly structured.
++// In either case, the plaintext is returned in em so that it may be read
++// independently of whether it was valid in order to maintain constant memory
++// access patterns. If the plaintext was valid then index contains the index of
++// the original message in em, to allow constant time padding removal.
++func decryptPKCS1v15(priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
+ k := priv.Size()
+ if k < 11 {
+ err = ErrDecryption
+ return
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+- m, err := decrypt(rand, priv, c)
++ em, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return
+ }
+
+- em = m.FillBytes(make([]byte, k))
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+ secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
+
+@@ -221,8 +215,7 @@ var hashPrefixes = map[crypto.Hash][]byte{
+ // function. If hash is zero, hashed is signed directly. This isn't
+ // advisable except for interoperability.
+ //
+-// If rand is not nil then RSA blinding will be used to avoid timing
+-// side-channel attacks.
++// The rand parameter is legacy and ignored, and it can be as nil.
+ //
+ // This function is deterministic. Thus, if the set of possible
+ // messages is small, an attacker may be able to build a map from
+@@ -249,13 +242,7 @@ func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []b
+ copy(em[k-tLen:k-hashLen], prefix)
+ copy(em[k-hashLen:k], hashed)
+
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
+- }
+-
+- return c.FillBytes(em), nil
++ return decryptAndCheck(priv, em)
+ }
+
+ // VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature.
+@@ -275,9 +262,7 @@ func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte)
+ return ErrVerification
+ }
+
+- c := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, c)
+- em := m.FillBytes(make([]byte, k))
++ em := encrypt(pub, sig)
+ // EM = 0x00 || 0x01 || PS || 0x00 || T
+
+ ok := subtle.ConstantTimeByteEq(em[0], 0)
+diff --git a/src/crypto/rsa/pss.go b/src/crypto/rsa/pss.go
+index 814522d..eaba4be 100644
+--- a/src/crypto/rsa/pss.go
++++ b/src/crypto/rsa/pss.go
+@@ -12,7 +12,6 @@ import (
+ "errors"
+ "hash"
+ "io"
+- "math/big"
+ )
+
+ // Per RFC 8017, Section 9.1
+@@ -207,19 +206,27 @@ func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
+ // Note that hashed must be the result of hashing the input message using the
+ // given hash function. salt is a random sequence of bytes whose length will be
+ // later used to verify the signature.
+-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
+- emBits := priv.N.BitLen() - 1
++func signPSSWithSalt(priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) ([]byte, error) {
++ emBits := bigBitLen(priv.N) - 1
+ em, err := emsaPSSEncode(hashed, emBits, salt, hash.New())
+ if err != nil {
+ return nil, err
+ }
+- m := new(big.Int).SetBytes(em)
+- c, err := decryptAndCheck(rand, priv, m)
+- if err != nil {
+- return nil, err
++
++ // RFC 8017: "Note that the octet length of EM will be one less than k if
++ // modBits - 1 is divisible by 8 and equal to k otherwise, where k is the
++ // length in octets of the RSA modulus n."
++ //
++ // This is extremely annoying, as all other encrypt and decrypt inputs are
++ // always the exact same size as the modulus. Since it only happens for
++ // weird modulus sizes, fix it by padding inefficiently.
++ if emLen, k := len(em), priv.Size(); emLen < k {
++ emNew := make([]byte, k)
++ copy(emNew[k-emLen:], em)
++ em = emNew
+ }
+- s := make([]byte, priv.Size())
+- return c.FillBytes(s), nil
++
++ return decryptAndCheck(priv, em)
+ }
+
+ const (
+@@ -269,7 +276,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ saltLength := opts.saltLength()
+ switch saltLength {
+ case PSSSaltLengthAuto:
+- saltLength = (priv.N.BitLen()-1+7)/8 - 2 - hash.Size()
++ saltLength = (bigBitLen(priv.N)-1+7)/8 - 2 - hash.Size()
+ case PSSSaltLengthEqualsHash:
+ saltLength = hash.Size()
+ }
+@@ -278,7 +285,7 @@ func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte,
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return nil, err
+ }
+- return signPSSWithSalt(rand, priv, hash, digest, salt)
++ return signPSSWithSalt(priv, hash, digest, salt)
+ }
+
+ // VerifyPSS verifies a PSS signature.
+@@ -291,13 +298,22 @@ func VerifyPSS(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts
+ if len(sig) != pub.Size() {
+ return ErrVerification
+ }
+- s := new(big.Int).SetBytes(sig)
+- m := encrypt(new(big.Int), pub, s)
+- emBits := pub.N.BitLen() - 1
++
++ emBits := bigBitLen(pub.N) - 1
+ emLen := (emBits + 7) / 8
+- if m.BitLen() > emLen*8 {
+- return ErrVerification
++ em := encrypt(pub, sig)
++
++ // Like in signPSSWithSalt, deal with mismatches between emLen and the size
++ // of the modulus. The spec would have us wire emLen into the encoding
++ // function, but we'd rather always encode to the size of the modulus and
++ // then strip leading zeroes if necessary. This only happens for weird
++ // modulus sizes anyway.
++ for len(em) > emLen && len(em) > 0 {
++ if em[0] != 0 {
++ return ErrVerification
++ }
++ em = em[1:]
+ }
+- em := m.FillBytes(make([]byte, emLen))
++
+ return emsaPSSVerify(digest, em, emBits, opts.saltLength(), hash.New())
+ }
+diff --git a/src/crypto/rsa/pss_test.go b/src/crypto/rsa/pss_test.go
+index c3a6d46..d018b43 100644
+--- a/src/crypto/rsa/pss_test.go
++++ b/src/crypto/rsa/pss_test.go
+@@ -233,7 +233,10 @@ func TestPSSSigning(t *testing.T) {
+ }
+ }
+
+-func TestSignWithPSSSaltLengthAuto(t *testing.T) {
++func TestPSS513(t *testing.T) {
++ // See Issue 42741, and separately, RFC 8017: "Note that the octet length of
++ // EM will be one less than k if modBits - 1 is divisible by 8 and equal to
++ // k otherwise, where k is the length in octets of the RSA modulus n."
+ key, err := GenerateKey(rand.Reader, 513)
+ if err != nil {
+ t.Fatal(err)
+@@ -246,8 +249,9 @@ func TestSignWithPSSSaltLengthAuto(t *testing.T) {
+ if err != nil {
+ t.Fatal(err)
+ }
+- if len(signature) == 0 {
+- t.Fatal("empty signature returned")
++ err = VerifyPSS(&key.PublicKey, crypto.SHA256, digest[:], signature, nil)
++ if err != nil {
++ t.Error(err)
+ }
+ }
+
+diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
+index 5a00ed2..29d9d31 100644
+--- a/src/crypto/rsa/rsa.go
++++ b/src/crypto/rsa/rsa.go
+@@ -19,13 +19,17 @@
+ // over the public key primitive, the PrivateKey type implements the
+ // Decrypter and Signer interfaces from the crypto package.
+ //
+-// The RSA operations in this package are not implemented using constant-time algorithms.
++// Operations in this package are implemented using constant-time algorithms,
++// except for [GenerateKey], [PrivateKey.Precompute], and [PrivateKey.Validate].
++// Every other operation only leaks the bit size of the involved values, which
++// all depend on the selected key size.
+ package rsa
+
+ import (
+ "crypto"
+ "crypto/rand"
+ "crypto/subtle"
++ "encoding/binary"
+ "errors"
+ "hash"
+ "io"
+@@ -35,7 +39,6 @@ import (
+ "crypto/internal/randutil"
+ )
+
+-var bigZero = big.NewInt(0)
+ var bigOne = big.NewInt(1)
+
+ // A PublicKey represents the public part of an RSA key.
+@@ -47,7 +50,7 @@ type PublicKey struct {
+ // Size returns the modulus size in bytes. Raw signatures and ciphertexts
+ // for or by this public key will have the same size.
+ func (pub *PublicKey) Size() int {
+- return (pub.N.BitLen() + 7) / 8
++ return (bigBitLen(pub.N) + 7) / 8
+ }
+
+ // OAEPOptions is an interface for passing options to OAEP decryption using the
+@@ -351,10 +354,19 @@ func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
+ // too large for the size of the public key.
+ var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
+
+-func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
+- e := big.NewInt(int64(pub.E))
+- c.Exp(m, e, pub.N)
+- return c
++func encrypt(pub *PublicKey, plaintext []byte) []byte {
++
++ N := modulusFromNat(natFromBig(pub.N))
++ m := natFromBytes(plaintext).expandFor(N)
++
++ e := make([]byte, 8)
++ binary.BigEndian.PutUint64(e, uint64(pub.E))
++ for len(e) > 1 && e[0] == 0 {
++ e = e[1:]
++ }
++
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(m, e, N).fillBytes(out)
+ }
+
+ // EncryptOAEP encrypts the given message with RSA-OAEP.
+@@ -404,12 +416,7 @@ func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, l
+ mgf1XOR(db, hash, seed)
+ mgf1XOR(seed, hash, db)
+
+- m := new(big.Int)
+- m.SetBytes(em)
+- c := encrypt(new(big.Int), pub, m)
+-
+- out := make([]byte, k)
+- return c.FillBytes(out), nil
++ return encrypt(pub, em), nil
+ }
+
+ // ErrDecryption represents a failure to decrypt a message.
+@@ -451,98 +458,71 @@ func (priv *PrivateKey) Precompute() {
+ }
+ }
+
+-// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
+-// random source is given, RSA blinding is used.
+-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- // TODO(agl): can we get away with reusing blinds?
+- if c.Cmp(priv.N) > 0 {
+- err = ErrDecryption
+- return
++// decrypt performs an RSA decryption of ciphertext into out.
++func decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
++
++ N := modulusFromNat(natFromBig(priv.N))
++ c := natFromBytes(ciphertext).expandFor(N)
++ if c.cmpGeq(N.nat) == 1 {
++ return nil, ErrDecryption
+ }
+ if priv.N.Sign() == 0 {
+ return nil, ErrDecryption
+ }
+
+- var ir *big.Int
+- if random != nil {
+- randutil.MaybeReadByte(random)
+-
+- // Blinding enabled. Blinding involves multiplying c by r^e.
+- // Then the decryption operation performs (m^e * r^e)^d mod n
+- // which equals mr mod n. The factor of r can then be removed
+- // by multiplying by the multiplicative inverse of r.
+-
+- var r *big.Int
+- ir = new(big.Int)
+- for {
+- r, err = rand.Int(random, priv.N)
+- if err != nil {
+- return
+- }
+- if r.Cmp(bigZero) == 0 {
+- r = bigOne
+- }
+- ok := ir.ModInverse(r, priv.N)
+- if ok != nil {
+- break
+- }
+- }
+- bigE := big.NewInt(int64(priv.E))
+- rpowe := new(big.Int).Exp(r, bigE, priv.N) // N != 0
+- cCopy := new(big.Int).Set(c)
+- cCopy.Mul(cCopy, rpowe)
+- cCopy.Mod(cCopy, priv.N)
+- c = cCopy
+- }
+-
++ // Note that because our private decryption exponents are stored as big.Int,
++ // we potentially leak the exact number of bits of these exponents. This
++ // isn't great, but should be fine.
+ if priv.Precomputed.Dp == nil {
+- m = new(big.Int).Exp(c, priv.D, priv.N)
+- } else {
+- // We have the precalculated values needed for the CRT.
+- m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
+- m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
+- m.Sub(m, m2)
+- if m.Sign() < 0 {
+- m.Add(m, priv.Primes[0])
+- }
+- m.Mul(m, priv.Precomputed.Qinv)
+- m.Mod(m, priv.Primes[0])
+- m.Mul(m, priv.Primes[1])
+- m.Add(m, m2)
+-
+- for i, values := range priv.Precomputed.CRTValues {
+- prime := priv.Primes[2+i]
+- m2.Exp(c, values.Exp, prime)
+- m2.Sub(m2, m)
+- m2.Mul(m2, values.Coeff)
+- m2.Mod(m2, prime)
+- if m2.Sign() < 0 {
+- m2.Add(m2, prime)
+- }
+- m2.Mul(m2, values.R)
+- m.Add(m, m2)
+- }
+- }
+-
+- if ir != nil {
+- // Unblind.
+- m.Mul(m, ir)
+- m.Mod(m, priv.N)
+- }
+-
+- return
++ out := make([]byte, modulusSize(N))
++ return new(nat).exp(c, priv.D.Bytes(), N).fillBytes(out), nil
++ }
++
++ t0 := new(nat)
++ P := modulusFromNat(natFromBig(priv.Primes[0]))
++ Q := modulusFromNat(natFromBig(priv.Primes[1]))
++ // m = c ^ Dp mod p
++ m := new(nat).exp(t0.mod(c, P), priv.Precomputed.Dp.Bytes(), P)
++ // m2 = c ^ Dq mod q
++ m2 := new(nat).exp(t0.mod(c, Q), priv.Precomputed.Dq.Bytes(), Q)
++ // m = m - m2 mod p
++ m.modSub(t0.mod(m2, P), P)
++ // m = m * Qinv mod p
++ m.modMul(natFromBig(priv.Precomputed.Qinv).expandFor(P), P)
++ // m = m * q mod N
++ m.expandFor(N).modMul(t0.mod(Q.nat, N), N)
++ // m = m + m2 mod N
++ m.modAdd(m2.expandFor(N), N)
++
++ for i, values := range priv.Precomputed.CRTValues {
++ p := modulusFromNat(natFromBig(priv.Primes[2+i]))
++ // m2 = c ^ Exp mod p
++ m2.exp(t0.mod(c, p), values.Exp.Bytes(), p)
++ // m2 = m2 - m mod p
++ m2.modSub(t0.mod(m, p), p)
++ // m2 = m2 * Coeff mod p
++ m2.modMul(natFromBig(values.Coeff).expandFor(p), p)
++ // m2 = m2 * R mod N
++ R := natFromBig(values.R).expandFor(N)
++ m2.expandFor(N).modMul(R, N)
++ // m = m + m2 mod N
++ m.modAdd(m2, N)
++ }
++
++ out := make([]byte, modulusSize(N))
++ return m.fillBytes(out), nil
+ }
+
+-func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
+- m, err = decrypt(random, priv, c)
++func decryptAndCheck(priv *PrivateKey, ciphertext []byte) (m []byte, err error) {
++ m, err = decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+
+ // In order to defend against errors in the CRT computation, m^e is
+ // calculated, which should match the original ciphertext.
+- check := encrypt(new(big.Int), &priv.PublicKey, m)
+- if c.Cmp(check) != 0 {
++ check := encrypt(&priv.PublicKey, m)
++ if subtle.ConstantTimeCompare(ciphertext, check) != 1 {
+ return nil, errors.New("rsa: internal error")
+ }
+ return m, nil
+@@ -554,9 +534,7 @@ func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int
+ // Encryption and decryption of a given message must use the same hash function
+ // and sha256.New() is a reasonable choice.
+ //
+-// The random parameter, if not nil, is used to blind the private-key operation
+-// and avoid timing side-channel attacks. Blinding is purely internal to this
+-// function – the random data need not match that used when encrypting.
++// The random parameter is legacy and ignored, and it can be as nil.
+ //
+ // The label parameter must match the value given when encrypting. See
+ // EncryptOAEP for details.
+@@ -570,9 +548,7 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ return nil, ErrDecryption
+ }
+
+- c := new(big.Int).SetBytes(ciphertext)
+-
+- m, err := decrypt(random, priv, c)
++ em, err := decrypt(priv, ciphertext)
+ if err != nil {
+ return nil, err
+ }
+@@ -581,10 +557,6 @@ func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext
+ lHash := hash.Sum(nil)
+ hash.Reset()
+
+- // We probably leak the number of leading zeros.
+- // It's not clear that we can do anything about this.
+- em := m.FillBytes(make([]byte, k))
+-
+ firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
+
+ seed := em[1 : hash.Size()+1]
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch
new file mode 100644
index 0000000000..13d3510504
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45289.patch
@@ -0,0 +1,121 @@
+From 20586c0dbe03d144f914155f879fa5ee287591a1 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Thu, 11 Jan 2024 11:31:57 -0800
+Subject: [PATCH] [release-branch.go1.21] net/http, net/http/cookiejar: avoid
+ subdomain matches on IPv6 zones
+
+When deciding whether to forward cookies or sensitive headers
+across a redirect, do not attempt to interpret an IPv6 address
+as a domain name.
+
+Avoids a case where a maliciously-crafted redirect to an
+IPv6 address with a scoped addressing zone could be
+misinterpreted as a within-domain redirect. For example,
+we could interpret "::1%.www.example.com" as a subdomain
+of "www.example.com".
+
+Thanks to Juho Nurminen of Mattermost for reporting this issue.
+
+Fixes CVE-2023-45289
+Fixes #65385
+For #65065
+
+Change-Id: I8f463f59f0e700c8a18733d2b264a8bcb3a19599
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2131938
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2173775
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569239
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+TryBot-Bypass: Michael Knyszek <mknyszek@google.com>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/20586c0dbe03d144f914155f879fa5ee287591a1]
+CVE: CVE-2023-45289
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/net/http/client.go | 6 ++++++
+ src/net/http/client_test.go | 1 +
+ src/net/http/cookiejar/jar.go | 7 +++++++
+ src/net/http/cookiejar/jar_test.go | 10 ++++++++++
+ 4 files changed, 24 insertions(+)
+
+diff --git a/src/net/http/client.go b/src/net/http/client.go
+index a496f1c..2031834 100644
+--- a/src/net/http/client.go
++++ b/src/net/http/client.go
+@@ -973,6 +973,12 @@ func isDomainOrSubdomain(sub, parent string) bool {
+ if sub == parent {
+ return true
+ }
++ // If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname).
++ // Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone.
++ // For example, "::1%.www.example.com" is not a subdomain of "www.example.com".
++ if strings.ContainsAny(sub, ":%") {
++ return false
++ }
+ // If sub is "foo.example.com" and parent is "example.com",
+ // that means sub must end in "."+parent.
+ // Do it without allocating.
+diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go
+index 2b4f53f..442fe35 100644
+--- a/src/net/http/client_test.go
++++ b/src/net/http/client_test.go
+@@ -1703,6 +1703,7 @@ func TestShouldCopyHeaderOnRedirect(t *testing.T) {
+ {"cookie2", "http://foo.com/", "http://bar.com/", false},
+ {"authorization", "http://foo.com/", "http://bar.com/", false},
+ {"www-authenticate", "http://foo.com/", "http://bar.com/", false},
++ {"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false},
+
+ // But subdomains should work:
+ {"www-authenticate", "http://foo.com/", "http://foo.com/", true},
+diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go
+index 9f19917..18cbfc2 100644
+--- a/src/net/http/cookiejar/jar.go
++++ b/src/net/http/cookiejar/jar.go
+@@ -356,6 +356,13 @@ func jarKey(host string, psl PublicSuffixList) string {
+
+ // isIP reports whether host is an IP address.
+ func isIP(host string) bool {
++ if strings.ContainsAny(host, ":%") {
++ // Probable IPv6 address.
++ // Hostnames can't contain : or %, so this is definitely not a valid host.
++ // Treating it as an IP is the more conservative option, and avoids the risk
++ // of interpeting ::1%.www.example.com as a subtomain of www.example.com.
++ return true
++ }
+ return net.ParseIP(host) != nil
+ }
+
+diff --git a/src/net/http/cookiejar/jar_test.go b/src/net/http/cookiejar/jar_test.go
+index 47fb1ab..fd8d40e 100644
+--- a/src/net/http/cookiejar/jar_test.go
++++ b/src/net/http/cookiejar/jar_test.go
+@@ -251,6 +251,7 @@ var isIPTests = map[string]bool{
+ "127.0.0.1": true,
+ "1.2.3.4": true,
+ "2001:4860:0:2001::68": true,
++ "::1%zone": true,
+ "example.com": false,
+ "1.1.1.300": false,
+ "www.foo.bar.net": false,
+@@ -613,6 +614,15 @@ var basicsTests = [...]jarTest{
+ {"http://www.host.test:1234/", "a=1"},
+ },
+ },
++ {
++ "IPv6 zone is not treated as a host.",
++ "https://example.com/",
++ []string{"a=1"},
++ "a=1",
++ []query{
++ {"https://[::1%25.example.com]:80/", ""},
++ },
++ },
+ }
+
+ func TestBasics(t *testing.T) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch b/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch
new file mode 100644
index 0000000000..ddc2f67c96
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2023-45290.patch
@@ -0,0 +1,271 @@
+From bf80213b121074f4ad9b449410a4d13bae5e9be0 Mon Sep 17 00:00:00 2001
+From: Damien Neil <dneil@google.com>
+Date: Tue, 16 Jan 2024 15:37:52 -0800
+Subject: [PATCH] [release-branch.go1.21] net/textproto, mime/multipart: avoid
+ unbounded read in MIME header
+
+mime/multipart.Reader.ReadForm allows specifying the maximum amount
+of memory that will be consumed by the form. While this limit is
+correctly applied to the parsed form data structure, it was not
+being applied to individual header lines in a form.
+
+For example, when presented with a form containing a header line
+that never ends, ReadForm will continue to read the line until it
+runs out of memory.
+
+Limit the amount of data consumed when reading a header.
+
+Fixes CVE-2023-45290
+Fixes #65389
+For #65383
+
+Change-Id: I7f9264d25752009e95f6b2c80e3d76aaf321d658
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2134435
+Reviewed-by: Roland Shoemaker <bracewell@google.com>
+Reviewed-by: Tatiana Bradley <tatianabradley@google.com>
+Reviewed-on: https://team-review.git.corp.google.com/c/golang/go-private/+/2173776
+Reviewed-by: Carlos Amedee <amedee@google.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/569240
+Auto-Submit: Michael Knyszek <mknyszek@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/bf80213b121074f4ad9b449410a4d13bae5e9be0]
+CVE: CVE-2023-45290
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/mime/multipart/formdata_test.go | 42 +++++++++++++++++++++++++
+ src/net/textproto/reader.go | 48 ++++++++++++++++++++---------
+ src/net/textproto/reader_test.go | 12 ++++++++
+ 3 files changed, 87 insertions(+), 15 deletions(-)
+
+diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go
+index c78eeb7..f729da6 100644
+--- a/src/mime/multipart/formdata_test.go
++++ b/src/mime/multipart/formdata_test.go
+@@ -421,6 +421,48 @@ func TestReadFormLimits(t *testing.T) {
+ }
+ }
+
++func TestReadFormEndlessHeaderLine(t *testing.T) {
++ for _, test := range []struct {
++ name string
++ prefix string
++ }{{
++ name: "name",
++ prefix: "X-",
++ }, {
++ name: "value",
++ prefix: "X-Header: ",
++ }, {
++ name: "continuation",
++ prefix: "X-Header: foo\r\n ",
++ }} {
++ t.Run(test.name, func(t *testing.T) {
++ const eol = "\r\n"
++ s := `--boundary` + eol
++ s += `Content-Disposition: form-data; name="a"` + eol
++ s += `Content-Type: text/plain` + eol
++ s += test.prefix
++ fr := io.MultiReader(
++ strings.NewReader(s),
++ neverendingReader('X'),
++ )
++ r := NewReader(fr, "boundary")
++ _, err := r.ReadForm(1 << 20)
++ if err != ErrMessageTooLarge {
++ t.Fatalf("ReadForm(1 << 20): %v, want ErrMessageTooLarge", err)
++ }
++ })
++ }
++}
++
++type neverendingReader byte
++
++func (r neverendingReader) Read(p []byte) (n int, err error) {
++ for i := range p {
++ p[i] = byte(r)
++ }
++ return len(p), nil
++}
++
+ func BenchmarkReadForm(b *testing.B) {
+ for _, test := range []struct {
+ name string
+diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go
+index ad2d777..cea6613 100644
+--- a/src/net/textproto/reader.go
++++ b/src/net/textproto/reader.go
+@@ -17,6 +17,10 @@ import (
+ "sync"
+ )
+
++// TODO: This should be a distinguishable error (ErrMessageTooLarge)
++// to allow mime/multipart to detect it.
++var errMessageTooLarge = errors.New("message too large")
++
+ // A Reader implements convenience methods for reading requests
+ // or responses from a text protocol network connection.
+ type Reader struct {
+@@ -38,13 +42,13 @@ func NewReader(r *bufio.Reader) *Reader {
+ // ReadLine reads a single line from r,
+ // eliding the final \n or \r\n from the returned string.
+ func (r *Reader) ReadLine() (string, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ return string(line), err
+ }
+
+ // ReadLineBytes is like ReadLine but returns a []byte instead of a string.
+ func (r *Reader) ReadLineBytes() ([]byte, error) {
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(-1)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -53,7 +57,10 @@ func (r *Reader) ReadLineBytes() ([]byte, error) {
+ return line, err
+ }
+
+-func (r *Reader) readLineSlice() ([]byte, error) {
++// readLineSlice reads a single line from r,
++// up to lim bytes long (or unlimited if lim is less than 0),
++// eliding the final \r or \r\n from the returned string.
++func (r *Reader) readLineSlice(lim int64) ([]byte, error) {
+ r.closeDot()
+ var line []byte
+ for {
+@@ -61,6 +68,9 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
++ if lim >= 0 && int64(len(line))+int64(len(l)) > lim {
++ return nil, errMessageTooLarge
++ }
+ // Avoid the copy if the first call produced a full line.
+ if line == nil && !more {
+ return l, nil
+@@ -93,7 +103,7 @@ func (r *Reader) readLineSlice() ([]byte, error) {
+ // A line consisting of only white space is never continued.
+ //
+ func (r *Reader) ReadContinuedLine() (string, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ return string(line), err
+ }
+
+@@ -114,7 +124,7 @@ func trim(s []byte) []byte {
+ // ReadContinuedLineBytes is like ReadContinuedLine but
+ // returns a []byte instead of a string.
+ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+- line, err := r.readContinuedLineSlice(noValidation)
++ line, err := r.readContinuedLineSlice(-1, noValidation)
+ if line != nil {
+ buf := make([]byte, len(line))
+ copy(buf, line)
+@@ -127,13 +137,14 @@ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) {
+ // returning a byte slice with all lines. The validateFirstLine function
+ // is run on the first read line, and if it returns an error then this
+ // error is returned from readContinuedLineSlice.
+-func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) {
++// It reads up to lim bytes of data (or unlimited if lim is less than 0).
++func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) {
+ if validateFirstLine == nil {
+ return nil, fmt.Errorf("missing validateFirstLine func")
+ }
+
+ // Read the first line.
+- line, err := r.readLineSlice()
++ line, err := r.readLineSlice(lim)
+ if err != nil {
+ return nil, err
+ }
+@@ -161,13 +172,21 @@ func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([
+ // copy the slice into buf.
+ r.buf = append(r.buf[:0], trim(line)...)
+
++ if lim < 0 {
++ lim = math.MaxInt64
++ }
++ lim -= int64(len(r.buf))
++
+ // Read continuation lines.
+ for r.skipSpace() > 0 {
+- line, err := r.readLineSlice()
++ r.buf = append(r.buf, ' ')
++ if int64(len(r.buf)) >= lim {
++ return nil, errMessageTooLarge
++ }
++ line, err := r.readLineSlice(lim - int64(len(r.buf)))
+ if err != nil {
+ break
+ }
+- r.buf = append(r.buf, ' ')
+ r.buf = append(r.buf, trim(line)...)
+ }
+ return r.buf, nil
+@@ -512,7 +531,8 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ // The first line cannot start with a leading space.
+ if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') {
+- line, err := r.readLineSlice()
++ const errorLimit = 80 // arbitrary limit on how much of the line we'll quote
++ line, err := r.readLineSlice(errorLimit)
+ if err != nil {
+ return m, err
+ }
+@@ -520,7 +540,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+
+ for {
+- kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon)
++ kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon)
+ if len(kv) == 0 {
+ return m, err
+ }
+@@ -541,7 +561,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+
+ maxHeaders--
+ if maxHeaders < 0 {
+- return nil, errors.New("message too large")
++ return nil, errMessageTooLarge
+ }
+
+ // backport 5c55ac9bf1e5f779220294c843526536605f42ab
+@@ -567,9 +587,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error)
+ }
+ maxMemory -= int64(len(value))
+ if maxMemory < 0 {
+- // TODO: This should be a distinguishable error (ErrMessageTooLarge)
+- // to allow mime/multipart to detect it.
+- return m, errors.New("message too large")
++ return m, errMessageTooLarge
+ }
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go
+index 3ae0de1..db1ed91 100644
+--- a/src/net/textproto/reader_test.go
++++ b/src/net/textproto/reader_test.go
+@@ -34,6 +34,18 @@ func TestReadLine(t *testing.T) {
+ }
+ }
+
++func TestReadLineLongLine(t *testing.T) {
++ line := strings.Repeat("12345", 10000)
++ r := reader(line + "\r\n")
++ s, err := r.ReadLine()
++ if err != nil {
++ t.Fatalf("Line 1: %v", err)
++ }
++ if s != line {
++ t.Fatalf("%v-byte line does not match expected %v-byte line", len(s), len(line))
++ }
++}
++
+ func TestReadContinuedLine(t *testing.T) {
+ r := reader("line1\nline\n 2\nline3\n")
+ s, err := r.ReadContinuedLine()
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch b/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch
new file mode 100644
index 0000000000..e9d9d972b9
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2024-24784.patch
@@ -0,0 +1,205 @@
+From 5330cd225ba54c7dc78c1b46dcdf61a4671a632c Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <bracewell@google.com>
+Date: Wed, 10 Jan 2024 11:02:14 -0800
+Subject: [PATCH] [release-branch.go1.22] net/mail: properly handle special
+ characters in phrase and obs-phrase
+
+Fixes a couple of misalignments with RFC 5322 which introduce
+significant diffs between (mostly) conformant parsers.
+
+This change reverts the changes made in CL50911, which allowed certain
+special RFC 5322 characters to appear unquoted in the "phrase" syntax.
+It is unclear why this change was made in the first place, and created
+a divergence from comformant parsers. In particular this resulted in
+treating comments in display names incorrectly.
+
+Additionally properly handle trailing malformed comments in the group
+syntax.
+
+For #65083
+Fixed #65849
+
+Change-Id: I00dddc044c6ae3381154e43236632604c390f672
+Reviewed-on: https://go-review.googlesource.com/c/go/+/555596
+Reviewed-by: Damien Neil <dneil@google.com>
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-on: https://go-review.googlesource.com/c/go/+/566215
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/5330cd225ba54c7dc78c1b46dcdf61a4671a632c]
+CVE: CVE-2024-24784
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ src/net/mail/message.go | 30 +++++++++++++++------------
+ src/net/mail/message_test.go | 40 ++++++++++++++++++++++++++----------
+ 2 files changed, 46 insertions(+), 24 deletions(-)
+
+diff --git a/src/net/mail/message.go b/src/net/mail/message.go
+index af516fc30f470..fc2a9e46f811b 100644
+--- a/src/net/mail/message.go
++++ b/src/net/mail/message.go
+@@ -280,7 +280,7 @@ func (a *Address) String() string {
+ // Add quotes if needed
+ quoteLocal := false
+ for i, r := range local {
+- if isAtext(r, false, false) {
++ if isAtext(r, false) {
+ continue
+ }
+ if r == '.' {
+@@ -444,7 +444,7 @@ func (p *addrParser) parseAddress(handleGroup bool) ([]*Address, error) {
+ if !p.consume('<') {
+ atext := true
+ for _, r := range displayName {
+- if !isAtext(r, true, false) {
++ if !isAtext(r, true) {
+ atext = false
+ break
+ }
+@@ -479,7 +479,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ // handle empty group.
+ p.skipSpace()
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ return group, nil
+ }
+
+@@ -496,7 +498,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) {
+ return nil, errors.New("mail: misformatted parenthetical comment")
+ }
+ if p.consume(';') {
+- p.skipCFWS()
++ if !p.skipCFWS() {
++ return nil, errors.New("mail: misformatted parenthetical comment")
++ }
+ break
+ }
+ if !p.consume(',') {
+@@ -566,6 +570,12 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
+ var words []string
+ var isPrevEncoded bool
+ for {
++ // obs-phrase allows CFWS after one word
++ if len(words) > 0 {
++ if !p.skipCFWS() {
++ return "", errors.New("mail: misformatted parenthetical comment")
++ }
++ }
+ // word = atom / quoted-string
+ var word string
+ p.skipSpace()
+@@ -661,7 +671,6 @@ Loop:
+ // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
+ // If permissive is true, consumeAtom will not fail on:
+ // - leading/trailing/double dots in the atom (see golang.org/issue/4938)
+-// - special characters (RFC 5322 3.2.3) except '<', '>', ':' and '"' (see golang.org/issue/21018)
+ func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) {
+ i := 0
+
+@@ -672,7 +681,7 @@ Loop:
+ case size == 1 && r == utf8.RuneError:
+ return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s)
+
+- case size == 0 || !isAtext(r, dot, permissive):
++ case size == 0 || !isAtext(r, dot):
+ break Loop
+
+ default:
+@@ -850,18 +859,13 @@ func (e charsetError) Error() string {
+
+ // isAtext reports whether r is an RFC 5322 atext character.
+ // If dot is true, period is included.
+-// If permissive is true, RFC 5322 3.2.3 specials is included,
+-// except '<', '>', ':' and '"'.
+-func isAtext(r rune, dot, permissive bool) bool {
++func isAtext(r rune, dot bool) bool {
+ switch r {
+ case '.':
+ return dot
+
+ // RFC 5322 3.2.3. specials
+- case '(', ')', '[', ']', ';', '@', '\\', ',':
+- return permissive
+-
+- case '<', '>', '"', ':':
++ case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials
+ return false
+ }
+ return isVchar(r)
+diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go
+index 1e1bb4092f659..1f2f62afbf406 100644
+--- a/src/net/mail/message_test.go
++++ b/src/net/mail/message_test.go
+@@ -385,8 +385,11 @@ func TestAddressParsingError(t *testing.T) {
+ 13: {"group not closed: null@example.com", "expected comma"},
+ 14: {"group: first@example.com, second@example.com;", "group with multiple addresses"},
+ 15: {"john.doe", "missing '@' or angle-addr"},
+- 16: {"john.doe@", "no angle-addr"},
++ 16: {"john.doe@", "missing '@' or angle-addr"},
+ 17: {"John Doe@foo.bar", "no angle-addr"},
++ 18: {" group: null@example.com; (asd", "misformatted parenthetical comment"},
++ 19: {" group: ; (asd", "misformatted parenthetical comment"},
++ 20: {`(John) Doe <jdoe@machine.example>`, "missing word in phrase:"},
+ }
+
+ for i, tc := range mustErrTestCases {
+@@ -436,24 +439,19 @@ func TestAddressParsing(t *testing.T) {
+ Address: "john.q.public@example.com",
+ }},
+ },
+- {
+- `"John (middle) Doe" <jdoe@machine.example>`,
+- []*Address{{
+- Name: "John (middle) Doe",
+- Address: "jdoe@machine.example",
+- }},
+- },
++ // Comment in display name
+ {
+ `John (middle) Doe <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John (middle) Doe",
++ Name: "John Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
++ // Display name is quoted string, so comment is not a comment
+ {
+- `John !@M@! Doe <jdoe@machine.example>`,
++ `"John (middle) Doe" <jdoe@machine.example>`,
+ []*Address{{
+- Name: "John !@M@! Doe",
++ Name: "John (middle) Doe",
+ Address: "jdoe@machine.example",
+ }},
+ },
+@@ -788,6 +786,26 @@ func TestAddressParsing(t *testing.T) {
+ },
+ },
+ },
++ // Comment in group display name
++ {
++ `group (comment:): a@example.com, b@example.com;`,
++ []*Address{
++ {
++ Address: "a@example.com",
++ },
++ {
++ Address: "b@example.com",
++ },
++ },
++ },
++ {
++ `x(:"):"@a.example;("@b.example;`,
++ []*Address{
++ {
++ Address: `@a.example;(@b.example`,
++ },
++ },
++ },
+ }
+ for _, test := range tests {
+ if len(test.exp) == 1 {
diff --git a/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch b/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch
new file mode 100644
index 0000000000..1398a2ca48
--- /dev/null
+++ b/meta/recipes-devtools/go/go-1.14/CVE-2024-24785.patch
@@ -0,0 +1,197 @@
+From 3643147a29352ca2894fd5d0d2069bc4b4335a7e Mon Sep 17 00:00:00 2001
+From: Roland Shoemaker <roland@golang.org>
+Date: Wed, 14 Feb 2024 17:18:36 -0800
+Subject: [PATCH] [release-branch.go1.21] html/template: escape additional
+ tokens in MarshalJSON errors
+
+Escape "</script" and "<!--" in errors returned from MarshalJSON errors
+when attempting to marshal types in script blocks. This prevents any
+user controlled content from prematurely terminating the script block.
+
+Updates #65697
+Fixes #65968
+
+Change-Id: Icf0e26c54ea7d9c1deed0bff11b6506c99ddef1b
+Reviewed-on: https://go-review.googlesource.com/c/go/+/564196
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Damien Neil <dneil@google.com>
+(cherry picked from commit ccbc725f2d678255df1bd326fa511a492aa3a0aa)
+Reviewed-on: https://go-review.googlesource.com/c/go/+/567515
+Reviewed-by: Carlos Amedee <carlos@golang.org>
+
+Upstream-Status: Backport [https://github.com/golang/go/commit/3643147a29352ca2894fd5d0d2069bc4b4335a7e]
+CVE: CVE-2024-24785
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/html/template/js.go | 22 ++++++++-
+ src/html/template/js_test.go | 96 ++++++++++++++++++++----------------
+ 2 files changed, 74 insertions(+), 44 deletions(-)
+
+diff --git a/src/html/template/js.go b/src/html/template/js.go
+index 35994f0..4d3b25d 100644
+--- a/src/html/template/js.go
++++ b/src/html/template/js.go
+@@ -171,13 +171,31 @@ func jsValEscaper(args ...interface{}) string {
+ // cyclic data. This may be an unacceptable DoS risk.
+ b, err := json.Marshal(a)
+ if err != nil {
+- // Put a space before comment so that if it is flush against
++ // While the standard JSON marshaller does not include user controlled
++ // information in the error message, if a type has a MarshalJSON method,
++ // the content of the error message is not guaranteed. Since we insert
++ // the error into the template, as part of a comment, we attempt to
++ // prevent the error from either terminating the comment, or the script
++ // block itself.
++ //
++ // In particular we:
++ // * replace "*/" comment end tokens with "* /", which does not
++ // terminate the comment
++ // * replace "</script" with "\x3C/script", and "<!--" with
++ // "\x3C!--", which prevents confusing script block termination
++ // semantics
++ //
++ // We also put a space before the comment so that if it is flush against
+ // a division operator it is not turned into a line comment:
+ // x/{{y}}
+ // turning into
+ // x//* error marshaling y:
+ // second line of error message */null
+- return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /"))
++ errStr := err.Error()
++ errStr = strings.ReplaceAll(errStr, "*/", "* /")
++ errStr = strings.ReplaceAll(errStr, "</script", `\x3C/script`)
++ errStr = strings.ReplaceAll(errStr, "<!--", `\x3C!--`)
++ return fmt.Sprintf(" /* %s */null ", errStr)
+ }
+
+ // TODO: maybe post-process output to prevent it from containing
+diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
+index de9ef28..3fc3baf 100644
+--- a/src/html/template/js_test.go
++++ b/src/html/template/js_test.go
+@@ -5,6 +5,7 @@
+ package template
+
+ import (
++ "errors"
+ "bytes"
+ "math"
+ "strings"
+@@ -104,61 +105,72 @@ func TestNextJsCtx(t *testing.T) {
+ }
+ }
+
++type jsonErrType struct{}
++
++func (e *jsonErrType) MarshalJSON() ([]byte, error) {
++ return nil, errors.New("beep */ boop </script blip <!--")
++}
++
+ func TestJSValEscaper(t *testing.T) {
+ tests := []struct {
+- x interface{}
+- js string
++ x interface{}
++ js string
++ skipNest bool
+ }{
+- {int(42), " 42 "},
+- {uint(42), " 42 "},
+- {int16(42), " 42 "},
+- {uint16(42), " 42 "},
+- {int32(-42), " -42 "},
+- {uint32(42), " 42 "},
+- {int16(-42), " -42 "},
+- {uint16(42), " 42 "},
+- {int64(-42), " -42 "},
+- {uint64(42), " 42 "},
+- {uint64(1) << 53, " 9007199254740992 "},
++ {int(42), " 42 ", false},
++ {uint(42), " 42 ", false},
++ {int16(42), " 42 ", false},
++ {uint16(42), " 42 ", false},
++ {int32(-42), " -42 ", false},
++ {uint32(42), " 42 ", false},
++ {int16(-42), " -42 ", false},
++ {uint16(42), " 42 ", false},
++ {int64(-42), " -42 ", false},
++ {uint64(42), " 42 ", false},
++ {uint64(1) << 53, " 9007199254740992 ", false},
+ // ulp(1 << 53) > 1 so this loses precision in JS
+ // but it is still a representable integer literal.
+- {uint64(1)<<53 + 1, " 9007199254740993 "},
+- {float32(1.0), " 1 "},
+- {float32(-1.0), " -1 "},
+- {float32(0.5), " 0.5 "},
+- {float32(-0.5), " -0.5 "},
+- {float32(1.0) / float32(256), " 0.00390625 "},
+- {float32(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {float64(1.0), " 1 "},
+- {float64(-1.0), " -1 "},
+- {float64(0.5), " 0.5 "},
+- {float64(-0.5), " -0.5 "},
+- {float64(0), " 0 "},
+- {math.Copysign(0, -1), " -0 "},
+- {"", `""`},
+- {"foo", `"foo"`},
++ {uint64(1)<<53 + 1, " 9007199254740993 ", false},
++ {float32(1.0), " 1 ", false},
++ {float32(-1.0), " -1 ", false},
++ {float32(0.5), " 0.5 ", false},
++ {float32(-0.5), " -0.5 ", false},
++ {float32(1.0) / float32(256), " 0.00390625 ", false},
++ {float32(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {float64(1.0), " 1 ", false},
++ {float64(-1.0), " -1 ", false},
++ {float64(0.5), " 0.5 ", false},
++ {float64(-0.5), " -0.5 ", false},
++ {float64(0), " 0 ", false},
++ {math.Copysign(0, -1), " -0 ", false},
++ {"", `""`, false},
++ {"foo", `"foo"`, false},
+ // Newlines.
+- {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
++ {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`, false},
+ // "\v" == "v" on IE 6 so use "\u000b" instead.
+- {"\t\x0b", `"\t\u000b"`},
+- {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
+- {[]interface{}{}, "[]"},
+- {[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
+- {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`},
+- {"<!--", `"\u003c!--"`},
+- {"-->", `"--\u003e"`},
+- {"<![CDATA[", `"\u003c![CDATA["`},
+- {"]]>", `"]]\u003e"`},
+- {"</script", `"\u003c/script"`},
+- {"\U0001D11E", "\"\U0001D11E\""}, // or "\uD834\uDD1E"
+- {nil, " null "},
++ {"\t\x0b", `"\t\u000b"`, false},
++ {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`, false},
++ {[]interface{}{}, "[]", false},
++ {[]interface{}{42, "foo", nil}, `[42,"foo",null]`, false},
++ {[]string{"<!--", "</script>", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`, false},
++ {"<!--", `"\u003c!--"`, false},
++ {"-->", `"--\u003e"`, false},
++ {"<![CDATA[", `"\u003c![CDATA["`, false},
++ {"]]>", `"]]\u003e"`, false},
++ {"</script", `"\u003c/script"`, false},
++ {"\U0001D11E", "\"\U0001D11E\"", false}, // or "\uD834\uDD1E"
++ {nil, " null ", false},
++ {&jsonErrType{}, " /* json: error calling MarshalJSON for type *template.jsonErrType: beep * / boop \\x3C/script blip \\x3C!-- */null ", true},
+ }
+
+ for _, test := range tests {
+ if js := jsValEscaper(test.x); js != test.js {
+ t.Errorf("%+v: want\n\t%q\ngot\n\t%q", test.x, test.js, js)
+ }
++ if test.skipNest {
++ continue
++ }
+ // Make sure that escaping corner cases are not broken
+ // by nesting.
+ a := []interface{}{test.x}
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/go/go-crosssdk.inc b/meta/recipes-devtools/go/go-crosssdk.inc
index f0bec79719..36c9b12af8 100644
--- a/meta/recipes-devtools/go/go-crosssdk.inc
+++ b/meta/recipes-devtools/go/go-crosssdk.inc
@@ -4,6 +4,8 @@ DEPENDS = "go-native virtual/${TARGET_PREFIX}gcc-crosssdk virtual/nativesdk-${TA
PN = "go-crosssdk-${SDK_SYS}"
PROVIDES = "virtual/${TARGET_PREFIX}go-crosssdk"
+export GOCACHE = "${B}/.cache"
+
do_configure[noexec] = "1"
do_compile() {
diff --git a/meta/recipes-devtools/go/go_1.14.bb b/meta/recipes-devtools/go/go_1.14.bb
index c17527998b..76ff788238 100644
--- a/meta/recipes-devtools/go/go_1.14.bb
+++ b/meta/recipes-devtools/go/go_1.14.bb
@@ -7,8 +7,8 @@ export CGO_ENABLED_riscv64 = ""
# windows/mips/riscv doesn't support -buildmode=pie, so skip the QA checking
# for windows/mips/riscv and their variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH',True) or 'riscv' in d.getVar('TARGET_ARCH',True) or 'windows' in d.getVar('TARGET_GOOS', True):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN',True), " textrel")
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH') or 'windows' in d.getVar('TARGET_GOOS'):
+ d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
else:
d.setVar('GOBUILDMODE', 'pie')
}
diff --git a/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch b/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
index f788e0fd43..9f4c8dc0bd 100644
--- a/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
+++ b/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch
@@ -1,4 +1,4 @@
-From bb4e42ad3a0cdd23a1d1797e6299c76b474867c0 Mon Sep 17 00:00:00 2001
+From 81d6519499dcfebe7d21e65e002a8885a4e8d852 Mon Sep 17 00:00:00 2001
From: Joshua Watt <JPEWhacker@gmail.com>
Date: Tue, 19 Nov 2019 13:12:17 -0600
Subject: [PATCH] Add --debug-prefix-map option
@@ -11,7 +11,7 @@ Upstream-Status: Submitted [https://bugzilla.nasm.us/show_bug.cgi?id=3392635]
Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
---
- asm/nasm.c | 26 +++++++++++++++++++++++++-
+ asm/nasm.c | 24 ++++++++++++++++++++++++
include/nasmlib.h | 9 +++++++++
nasm.txt | 4 ++++
nasmlib/filename.c | 20 ++++++++++++++++++++
@@ -23,34 +23,32 @@ Signed-off-by: Joshua Watt <JPEWhacker@gmail.com>
stdlib/strlcat.c | 2 +-
test/elfdebugprefix.asm | 6 ++++++
test/performtest.pl | 12 ++++++++++--
- 12 files changed, 83 insertions(+), 10 deletions(-)
+ 12 files changed, 82 insertions(+), 9 deletions(-)
create mode 100644 test/elfdebugprefix.asm
diff --git a/asm/nasm.c b/asm/nasm.c
-index a0e1719..fc6c62e 100644
+index e5ae89a..7a7f8b4 100644
--- a/asm/nasm.c
+++ b/asm/nasm.c
-@@ -938,7 +938,8 @@ enum text_options {
- OPT_LIMIT,
+@@ -939,6 +939,7 @@ enum text_options {
OPT_KEEP_ALL,
OPT_NO_LINE,
-- OPT_DEBUG
-+ OPT_DEBUG,
-+ OPT_DEBUG_PREFIX_MAP
+ OPT_DEBUG,
++ OPT_DEBUG_PREFIX_MAP,
+ OPT_REPRODUCIBLE
};
enum need_arg {
- ARG_NO,
-@@ -970,6 +971,7 @@ static const struct textargs textopts[] = {
+@@ -971,6 +972,7 @@ static const struct textargs textopts[] = {
{"keep-all", OPT_KEEP_ALL, ARG_NO, 0},
{"no-line", OPT_NO_LINE, ARG_NO, 0},
{"debug", OPT_DEBUG, ARG_MAYBE, 0},
+ {"debug-prefix-map", OPT_DEBUG_PREFIX_MAP, true, 0},
+ {"reproducible", OPT_REPRODUCIBLE, ARG_NO, 0},
{NULL, OPT_BOGUS, ARG_NO, 0}
};
-
-@@ -1332,6 +1334,26 @@ static bool process_arg(char *p, char *q, int pass)
- case OPT_DEBUG:
- debug_nasm = param ? strtoul(param, NULL, 10) : debug_nasm+1;
+@@ -1337,6 +1339,26 @@ static bool process_arg(char *p, char *q, int pass)
+ case OPT_REPRODUCIBLE:
+ reproducible = true;
break;
+ case OPT_DEBUG_PREFIX_MAP: {
+ struct debug_prefix_list *d;
@@ -75,7 +73,7 @@ index a0e1719..fc6c62e 100644
case OPT_HELP:
help(stdout);
exit(0);
-@@ -2297,6 +2319,8 @@ static void help(FILE *out)
+@@ -2304,6 +2326,8 @@ static void help(FILE *out)
" -w-x disable warning x (also -Wno-x)\n"
" -w[+-]error promote all warnings to errors (also -Werror)\n"
" -w[+-]error=x promote warning x to errors (also -Werror=x)\n"
@@ -85,7 +83,7 @@ index a0e1719..fc6c62e 100644
fprintf(out, " %-20s %s\n",
diff --git a/include/nasmlib.h b/include/nasmlib.h
-index e9bfbcc..98fc653 100644
+index 438178d..4c3e90d 100644
--- a/include/nasmlib.h
+++ b/include/nasmlib.h
@@ -250,10 +250,19 @@ int64_t readstrnum(char *str, int length, bool *warn);
@@ -181,10 +179,10 @@ index 54b22f8..c4a412c 100644
static void as86_cleanup(void)
diff --git a/output/outcoff.c b/output/outcoff.c
-index bcd9ff3..15bfcf3 100644
+index 58fa024..14baf7b 100644
--- a/output/outcoff.c
+++ b/output/outcoff.c
-@@ -1095,14 +1095,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value,
+@@ -1072,14 +1072,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value,
static void coff_write_symbols(void)
{
@@ -215,7 +213,7 @@ index 61af020..1292958 100644
nsects = sectlen = 0;
syms = saa_init((int32_t)sizeof(struct elf_symbol));
diff --git a/output/outieee.c b/output/outieee.c
-index 4cc0f0f..2468724 100644
+index 6d6d4b2..cdb8333 100644
--- a/output/outieee.c
+++ b/output/outieee.c
@@ -207,7 +207,7 @@ static void ieee_unqualified_name(char *, char *);
@@ -228,10 +226,10 @@ index 4cc0f0f..2468724 100644
fpubhead = NULL;
fpubtail = &fpubhead;
diff --git a/output/outobj.c b/output/outobj.c
-index 0d4d311..d8dd6a0 100644
+index 56b43f9..fefea94 100644
--- a/output/outobj.c
+++ b/output/outobj.c
-@@ -638,7 +638,7 @@ static enum directive_result obj_directive(enum directive, char *);
+@@ -644,7 +644,7 @@ static enum directive_result obj_directive(enum directive, char *);
static void obj_init(void)
{
diff --git a/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
new file mode 100644
index 0000000000..1bd49c9fd9
--- /dev/null
+++ b/meta/recipes-devtools/nasm/nasm/CVE-2022-44370.patch
@@ -0,0 +1,104 @@
+From b37677f7e40276bd8f504584bcba2c092f1146a8 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Mon, 7 Nov 2022 10:26:03 -0800
+Subject: [PATCH] quote_for_pmake: fix counter underrun resulting in segfault
+
+while (nbs--) { ... } ends with nbs == -1. Rather than a minimal fix,
+introduce mempset() to make these kinds of errors less likely in the
+future.
+
+Fixes: https://bugzilla.nasm.us/show_bug.cgi?id=3392815
+Reported-by: <13579and24680@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+
+Upstream-Status: Backport
+CVE: CVE-2022-4437
+
+Reference to upstream patch:
+[https://github.com/netwide-assembler/nasm/commit/2d4e6952417ec6f08b6f135d2b5d0e19b7dae30d]
+
+Signed-off-by: Archana Polampalli <archana.polampalli@windriver.com>
+---
+ asm/nasm.c | 12 +++++-------
+ configure.ac | 1 +
+ include/compiler.h | 7 +++++++
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+diff --git a/asm/nasm.c b/asm/nasm.c
+index 7a7f8b4..675cff4 100644
+--- a/asm/nasm.c
++++ b/asm/nasm.c
+@@ -1,6 +1,6 @@
+ /* ----------------------------------------------------------------------- *
+ *
+- * Copyright 1996-2020 The NASM Authors - All Rights Reserved
++ * Copyright 1996-2022 The NASM Authors - All Rights Reserved
+ * See the file AUTHORS included with the NASM distribution for
+ * the specific copyright holders.
+ *
+@@ -814,8 +814,7 @@ static char *quote_for_pmake(const char *str)
+ }
+
+ /* Convert N backslashes at the end of filename to 2N backslashes */
+- if (nbs)
+- n += nbs;
++ n += nbs;
+
+ os = q = nasm_malloc(n);
+
+@@ -824,10 +823,10 @@ static char *quote_for_pmake(const char *str)
+ switch (*p) {
+ case ' ':
+ case '\t':
+- while (nbs--)
+- *q++ = '\\';
++ q = mempset(q, '\\', nbs);
+ *q++ = '\\';
+ *q++ = *p;
++ nbs = 0;
+ break;
+ case '$':
+ *q++ = *p;
+@@ -849,9 +848,8 @@ static char *quote_for_pmake(const char *str)
+ break;
+ }
+ }
+- while (nbs--)
+- *q++ = '\\';
+
++ q = mempset(q, '\\', nbs);
+ *q = '\0';
+
+ return os;
+diff --git a/configure.ac b/configure.ac
+index 39680b1..940ebe2 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -199,6 +199,7 @@ AC_CHECK_FUNCS(strrchrnul)
+ AC_CHECK_FUNCS(iscntrl)
+ AC_CHECK_FUNCS(isascii)
+ AC_CHECK_FUNCS(mempcpy)
++AC_CHECK_FUNCS(mempset)
+
+ AC_CHECK_FUNCS(getuid)
+ AC_CHECK_FUNCS(getgid)
+diff --git a/include/compiler.h b/include/compiler.h
+index db3d6d6..b64da6a 100644
+--- a/include/compiler.h
++++ b/include/compiler.h
+@@ -256,6 +256,13 @@ static inline void *mempcpy(void *dst, const void *src, size_t n)
+ }
+ #endif
+
++#ifndef HAVE_MEMPSET
++static inline void *mempset(void *dst, int c, size_t n)
++{
++ return (char *)memset(dst, c, n) + n;
++}
++#endif
++
+ /*
+ * Hack to support external-linkage inline functions
+ */
+--
+2.40.0
diff --git a/meta/recipes-devtools/nasm/nasm_2.15.03.bb b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
index fc7046244a..c5638debdd 100644
--- a/meta/recipes-devtools/nasm/nasm_2.15.03.bb
+++ b/meta/recipes-devtools/nasm/nasm_2.15.05.bb
@@ -8,13 +8,14 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe"
SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \
file://0001-stdlib-Add-strlcat.patch \
file://0002-Add-debug-prefix-map-option.patch \
+ file://CVE-2022-44370.patch \
"
-SRC_URI[sha256sum] = "04e7343d9bf112bffa9fda86f6c7c8b120c2ccd700b882e2db9f57484b1bd778"
+SRC_URI[sha256sum] = "3c4b8339e5ab54b1bcb2316101f8985a5da50a3f9e504d43fa6f35668bee2fd0"
EXTRA_AUTORECONF_append = " -I autoconf/m4"
-inherit autotools
+inherit autotools-brokensep
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-devtools/ninja/ninja_1.10.0.bb b/meta/recipes-devtools/ninja/ninja_1.10.0.bb
index ae3f3f1ea8..755b73a173 100644
--- a/meta/recipes-devtools/ninja/ninja_1.10.0.bb
+++ b/meta/recipes-devtools/ninja/ninja_1.10.0.bb
@@ -29,3 +29,6 @@ do_install() {
}
BBCLASSEXTEND = "native nativesdk"
+
+# This is a different Ninja
+CVE_CHECK_WHITELIST += "CVE-2021-4336"
diff --git a/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch b/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch
new file mode 100644
index 0000000000..bec21e67f4
--- /dev/null
+++ b/meta/recipes-devtools/opkg/opkg/0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch
@@ -0,0 +1,50 @@
+From 8b45a3c4cab95382beea1ecdddeb2e4a9ed14aba Mon Sep 17 00:00:00 2001
+From: Jo-Philipp Wich <jo@mein.io>
+Date: Wed, 1 Apr 2020 21:47:40 +0200
+Subject: [PATCH 001/104] file_util.c: fix possible bad memory access in
+ file_read_line_alloc()
+
+In the case of a zero length string being returned by fgets(), the condition
+checking for a trailing new line would perform a bad memory access outside
+of `buf`. This might happen when line with a leading null byte is read.
+
+Avoid this case by checking that the string has a length of at least one
+byte. Also change the unsigned int types to size_t to store length values
+while we're at it.
+
+Upstream-Status: Backport [https://github.com/ndmsystems/opkg/commit/8b45a3c4cab95382beea1ecdddeb2e4a9ed14aba]
+
+Signed-off-by: Jo-Philipp Wich <jo@mein.io>
+Signed-off-by: Alejandro del Castillo <alejandro.delcastillo@ni.com>
+Signed-off-by: virendra thakur <virendrak@kpit.com>
+---
+ libopkg/file_util.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/libopkg/file_util.c b/libopkg/file_util.c
+index fbed7b4..ee9f59d 100644
+--- a/libopkg/file_util.c
++++ b/libopkg/file_util.c
+@@ -127,17 +127,14 @@ char *file_readlink_alloc(const char *file_name)
+ */
+ char *file_read_line_alloc(FILE * fp)
+ {
++ size_t buf_len, line_size;
+ char buf[BUFSIZ];
+- unsigned int buf_len;
+ char *line = NULL;
+- unsigned int line_size = 0;
+ int got_nl = 0;
+
+- buf[0] = '\0';
+-
+ while (fgets(buf, BUFSIZ, fp)) {
+ buf_len = strlen(buf);
+- if (buf[buf_len - 1] == '\n') {
++ if (buf_len > 0 && buf[buf_len - 1] == '\n') {
+ buf_len--;
+ buf[buf_len] = '\0';
+ got_nl = 1;
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/opkg/opkg_0.4.2.bb b/meta/recipes-devtools/opkg/opkg_0.4.2.bb
index a813f7258b..3ebc27c8ee 100644
--- a/meta/recipes-devtools/opkg/opkg_0.4.2.bb
+++ b/meta/recipes-devtools/opkg/opkg_0.4.2.bb
@@ -16,6 +16,7 @@ SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz
file://opkg.conf \
file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \
file://sourcedateepoch.patch \
+ file://0001-file_util.c-fix-possible-bad-memory-access-in-file_r.patch \
file://run-ptest \
"
@@ -50,7 +51,9 @@ EXTRA_OECONF_class-native = "--localstatedir=/${@os.path.relpath('${localstatedi
do_install_append () {
install -d ${D}${sysconfdir}/opkg
install -m 0644 ${WORKDIR}/opkg.conf ${D}${sysconfdir}/opkg/opkg.conf
- echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option info_dir ${OPKGLIBDIR}/opkg/info" >>${D}${sysconfdir}/opkg/opkg.conf
+ echo "option status_file ${OPKGLIBDIR}/opkg/status" >>${D}${sysconfdir}/opkg/opkg.conf
# We need to create the lock directory
install -d ${D}${OPKGLIBDIR}/opkg
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-31484.patch b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
new file mode 100644
index 0000000000..0fea7bf8a8
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-31484.patch
@@ -0,0 +1,27 @@
+CVE: CVE-2023-31484
+Upstream-Status: Backport [ import from Ubuntu perl_5.30.0-9ubuntu0.5
+upstream https://github.com/andk/cpanpm/commit/9c98370287f4e709924aee7c58ef21c85289a7f0 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 9c98370287f4e709924aee7c58ef21c85289a7f0 Mon Sep 17 00:00:00 2001
+From: Stig Palmquist <git@stig.io>
+Date: Tue, 28 Feb 2023 11:54:06 +0100
+Subject: [PATCH] Add verify_SSL=>1 to HTTP::Tiny to verify https server
+ identity
+
+---
+ lib/CPAN/HTTP/Client.pm | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/cpan/CPAN/lib/CPAN/HTTP/Client.pm b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+index 4fc792c26..a616fee20 100644
+--- a/cpan/CPAN/lib/CPAN/HTTP/Client.pm
++++ b/cpan/CPAN/lib/CPAN/HTTP/Client.pm
+@@ -32,6 +32,7 @@ sub mirror {
+
+ my $want_proxy = $self->_want_proxy($uri);
+ my $http = HTTP::Tiny->new(
++ verify_SSL => 1,
+ $want_proxy ? (proxy => $self->{proxy}) : ()
+ );
+
diff --git a/meta/recipes-devtools/perl/files/CVE-2023-47038.patch b/meta/recipes-devtools/perl/files/CVE-2023-47038.patch
new file mode 100644
index 0000000000..59252c560c
--- /dev/null
+++ b/meta/recipes-devtools/perl/files/CVE-2023-47038.patch
@@ -0,0 +1,121 @@
+as per https://ubuntu.com/security/CVE-2023-47100 , CVE-2023-47100 is duplicate of CVE-2023-47038
+CVE: CVE-2023-47038 CVE-2023-47100
+Upstream-Status: Backport [ import from ubuntu perl_5.30.0-9ubuntu0.5
+upstream https://github.com/Perl/perl5/commit/12c313ce49b36160a7ca2e9b07ad5bd92ee4a010 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 12c313ce49b36160a7ca2e9b07ad5bd92ee4a010 Mon Sep 17 00:00:00 2001
+From: Karl Williamson <khw@cpan.org>
+Date: Sat, 9 Sep 2023 11:59:09 -0600
+Subject: [PATCH 1/2] Fix read/write past buffer end: perl-security#140
+
+A package name may be specified in a \p{...} regular expression
+construct. If unspecified, "utf8::" is assumed, which is the package
+all official Unicode properties are in. By specifying a different
+package, one can create a user-defined property with the same
+unqualified name as a Unicode one. Such a property is defined by a sub
+whose name begins with "Is" or "In", and if the sub wishes to refer to
+an official Unicode property, it must explicitly specify the "utf8::".
+S_parse_uniprop_string() is used to parse the interior of both \p{} and
+the user-defined sub lines.
+
+In S_parse_uniprop_string(), it parses the input "name" parameter,
+creating a modified copy, "lookup_name", malloc'ed with the same size as
+"name". The modifications are essentially to create a canonicalized
+version of the input, with such things as extraneous white-space
+stripped off. I found it convenient to strip off the package specifier
+"utf8::". To to so, the code simply pretends "lookup_name" begins just
+after the "utf8::", and adjusts various other values to compensate.
+However, it missed the adjustment of one required one.
+
+This is only a problem when the property name begins with "perl" and
+isn't "perlspace" nor "perlword". All such ones are undocumented
+internal properties.
+
+What happens in this case is that the input is reparsed with slightly
+different rules in effect as to what is legal versus illegal. The
+problem is that "lookup_name" no longer is pointing to its initial
+value, but "name" is. Thus the space allocated for filling "lookup_name"
+is now shorter than "name", and as this shortened "lookup_name" is
+filled by copying suitable portions of "name", the write can be to
+unallocated space.
+
+The solution is to skip the "utf8::" when reparsing "name". Then both
+"lookup_name" and "name" are effectively shortened by the same amount,
+and there is no going off the end.
+
+This commit also does white-space adjustment so that things align
+vertically for readability.
+
+This can be easily backported to earlier Perl releases.
+---
+ regcomp.c | 17 +++++++++++------
+ t/re/pat_advanced.t | 8 ++++++++
+ 2 files changed, 19 insertions(+), 6 deletions(-)
+
+--- a/regcomp.c
++++ b/regcomp.c
+@@ -22606,7 +22606,7 @@ Perl_parse_uniprop_string(pTHX_
+ * compile perl to know about them) */
+ bool is_nv_type = FALSE;
+
+- unsigned int i, j = 0;
++ unsigned int i = 0, i_zero = 0, j = 0;
+ int equals_pos = -1; /* Where the '=' is found, or negative if none */
+ int slash_pos = -1; /* Where the '/' is found, or negative if none */
+ int table_index = 0; /* The entry number for this property in the table
+@@ -22717,9 +22717,13 @@ Perl_parse_uniprop_string(pTHX_
+ * all of them are considered to be for that package. For the purposes of
+ * parsing the rest of the property, strip it off */
+ if (non_pkg_begin == STRLENs("utf8::") && memBEGINPs(name, name_len, "utf8::")) {
+- lookup_name += STRLENs("utf8::");
+- j -= STRLENs("utf8::");
+- equals_pos -= STRLENs("utf8::");
++ lookup_name += STRLENs("utf8::");
++ j -= STRLENs("utf8::");
++ equals_pos -= STRLENs("utf8::");
++ i_zero = STRLENs("utf8::"); /* When resetting 'i' to reparse
++ from the beginning, it has to be
++ set past what we're stripping
++ off */
+ }
+
+ /* Here, we are either done with the whole property name, if it was simple;
+@@ -22997,7 +23001,8 @@ Perl_parse_uniprop_string(pTHX_
+
+ /* We set the inputs back to 0 and the code below will reparse,
+ * using strict */
+- i = j = 0;
++ i = i_zero;
++ j = 0;
+ }
+ }
+
+@@ -23018,7 +23023,7 @@ Perl_parse_uniprop_string(pTHX_
+ * separates two digits */
+ if (cur == '_') {
+ if ( stricter
+- && ( i == 0 || (int) i == equals_pos || i == name_len- 1
++ && ( i == i_zero || (int) i == equals_pos || i == name_len- 1
+ || ! isDIGIT_A(name[i-1]) || ! isDIGIT_A(name[i+1])))
+ {
+ lookup_name[j++] = '_';
+--- a/t/re/pat_advanced.t
++++ b/t/re/pat_advanced.t
+@@ -2524,6 +2524,14 @@ EOF
+ "", {}, "*COMMIT caused positioning beyond EOS");
+ }
+
++ { # perl-security#140, read/write past buffer end
++ fresh_perl_like('qr/\p{utf8::perl x}/',
++ qr/Illegal user-defined property name "utf8::perl x" in regex/,
++ {}, "perl-security#140");
++ fresh_perl_is('qr/\p{utf8::_perl_surrogate}/', "",
++ {}, "perl-security#140");
++ }
++
+
+ # !!! NOTE that tests that aren't at all likely to crash perl should go
+ # a ways above, above these last ones. There's a comment there that, like
diff --git a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
index a6fd7b1c07..c91b44cd6e 100644
--- a/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
+++ b/meta/recipes-devtools/perl/libmodule-build-perl_0.4231.bb
@@ -37,6 +37,7 @@ EXTRA_CPAN_BUILD_FLAGS = "--create_packlist=0"
do_install_append () {
rm -rf ${D}${docdir}/perl/html
+ sed -i "s:^#!.*:#!/usr/bin/env perl:" ${D}${bindir}/config_data
}
do_install_ptest() {
diff --git a/meta/recipes-devtools/perl/perl_5.30.1.bb b/meta/recipes-devtools/perl/perl_5.30.1.bb
index 9bb94e7caa..bf81a023b8 100644
--- a/meta/recipes-devtools/perl/perl_5.30.1.bb
+++ b/meta/recipes-devtools/perl/perl_5.30.1.bb
@@ -29,6 +29,8 @@ SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \
file://CVE-2020-10878_1.patch \
file://CVE-2020-10878_2.patch \
file://CVE-2020-12723.patch \
+ file://CVE-2023-31484.patch \
+ file://CVE-2023-47038.patch \
"
SRC_URI_append_class-native = " \
file://perl-configpm-switch.patch \
@@ -44,6 +46,10 @@ SRC_URI[perl-cross.sha256sum] = "edce0b0c2f725e2db3f203d6d8e9f3f7161256f5d159055
S = "${WORKDIR}/perl-${PV}"
+# This is windows only issue.
+# https://ubuntu.com/security/CVE-2023-47039
+CVE_CHECK_WHITELIST += "CVE-2023-47039"
+
inherit upstream-version-is-even update-alternatives
DEPENDS += "zlib virtual/crypt"
diff --git a/meta/recipes-devtools/python/python-setuptools.inc b/meta/recipes-devtools/python/python-setuptools.inc
index 29be852f66..5faf62bc3a 100644
--- a/meta/recipes-devtools/python/python-setuptools.inc
+++ b/meta/recipes-devtools/python/python-setuptools.inc
@@ -8,6 +8,8 @@ PYPI_PACKAGE_EXT = "zip"
inherit pypi
+SRC_URI += " file://CVE-2022-40897.patch "
+
SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch"
SRC_URI[md5sum] = "0c956eea142af9c2b02d72e3c042af30"
diff --git a/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch b/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch
new file mode 100644
index 0000000000..a38ab57bc6
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-pip/CVE-2021-3572.patch
@@ -0,0 +1,48 @@
+From c4fd13410b9a219f77fc30775d4a0ac9f69725bd Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 16 Jun 2022 09:52:43 +0530
+Subject: [PATCH] CVE-2021-3572
+
+Upstream-Status: Backport [https://github.com/pypa/pip/commit/e46bdda9711392fec0c45c1175bae6db847cb30b]
+CVE: CVE-2021-3572
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ news/9827.bugfix.rst | 3 +++
+ src/pip/_internal/vcs/git.py | 10 ++++++++--
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+ create mode 100644 news/9827.bugfix.rst
+
+diff --git a/news/9827.bugfix.rst b/news/9827.bugfix.rst
+new file mode 100644
+index 0000000..e0d27c3
+--- /dev/null
++++ b/news/9827.bugfix.rst
+@@ -0,0 +1,3 @@
++**SECURITY**: Stop splitting on unicode separators in git references,
++which could be maliciously used to install a different revision on the
++repository.
+diff --git a/src/pip/_internal/vcs/git.py b/src/pip/_internal/vcs/git.py
+index 7483303..1b895f6 100644
+--- a/src/pip/_internal/vcs/git.py
++++ b/src/pip/_internal/vcs/git.py
+@@ -137,9 +137,15 @@ class Git(VersionControl):
+ output = cls.run_command(['show-ref', rev], cwd=dest,
+ show_stdout=False, on_returncode='ignore')
+ refs = {}
+- for line in output.strip().splitlines():
++ # NOTE: We do not use splitlines here since that would split on other
++ # unicode separators, which can be maliciously used to install a
++ # different revision.
++ for line in output.strip().split("\n"):
++ line = line.rstrip("\r")
++ if not line:
++ continue
+ try:
+- sha, ref = line.split()
++ ref_sha, ref_name = line.split(" ", maxsplit=2)
+ except ValueError:
+ # Include the offending line to simplify troubleshooting if
+ # this error ever occurs.
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/python/python3-pip_20.0.2.bb b/meta/recipes-devtools/python/python3-pip_20.0.2.bb
index 08738fb2f9..e24c6f4477 100644
--- a/meta/recipes-devtools/python/python3-pip_20.0.2.bb
+++ b/meta/recipes-devtools/python/python3-pip_20.0.2.bb
@@ -6,6 +6,7 @@ LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=8ba06d529c955048e5ddd7c45459eb2e"
DEPENDS += "python3 python3-setuptools-native"
+SRC_URI = "file://CVE-2021-3572.patch "
SRC_URI[md5sum] = "7d42ba49b809604f0df3d55df1c3fd86"
SRC_URI[sha256sum] = "7db0c8ea4c7ea51c8049640e8e6e7fde949de672bfa4949920675563a5a6967f"
diff --git a/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch b/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch
new file mode 100644
index 0000000000..9150cea07e
--- /dev/null
+++ b/meta/recipes-devtools/python/python3-setuptools/CVE-2022-40897.patch
@@ -0,0 +1,29 @@
+From 43a9c9bfa6aa626ec2a22540bea28d2ca77964be Mon Sep 17 00:00:00 2001
+From: "Jason R. Coombs" <jaraco@jaraco.com>
+Date: Fri, 4 Nov 2022 13:47:53 -0400
+Subject: [PATCH] Limit the amount of whitespace to search/backtrack. Fixes
+ #3659.
+
+CVE: CVE-2022-40897
+Upstream-Status: Backport [
+Upstream : https://github.com/pypa/setuptools/commit/43a9c9bfa6aa626ec2a22540bea28d2ca77964be
+Import from Ubuntu: http://archive.ubuntu.com/ubuntu/pool/main/s/setuptools/setuptools_45.2.0-1ubuntu0.1.debian.tar.xz
+]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ setuptools/package_index.py | 2 +-
+ setuptools/tests/test_packageindex.py | 1 -
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- setuptools-45.2.0.orig/setuptools/package_index.py
++++ setuptools-45.2.0/setuptools/package_index.py
+@@ -215,7 +215,7 @@ def unique_values(func):
+ return wrapper
+
+
+-REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
++REL = re.compile(r"""<([^>]*\srel\s{0,10}=\s{0,10}['"]?([^'" >]+)[^>]*)>""", re.I)
+ # this line is here to fix emacs' cruddy broken syntax highlighting
+
+
diff --git a/meta/recipes-devtools/python/python3/CVE-2023-24329.patch b/meta/recipes-devtools/python/python3/CVE-2023-24329.patch
new file mode 100644
index 0000000000..23dec65602
--- /dev/null
+++ b/meta/recipes-devtools/python/python3/CVE-2023-24329.patch
@@ -0,0 +1,80 @@
+From 72d356e3584ebfb8e813a8e9f2cd3dccf233c0d9 Mon Sep 17 00:00:00 2001
+From: "Miss Islington (bot)"
+ <31488909+miss-islington@users.noreply.github.com>
+Date: Sun, 13 Nov 2022 11:00:25 -0800
+Subject: [PATCH] gh-99418: Make urllib.parse.urlparse enforce that a scheme
+ must begin with an alphabetical ASCII character. (GH-99421)
+
+Prevent urllib.parse.urlparse from accepting schemes that don't begin with an alphabetical ASCII character.
+
+RFC 3986 defines a scheme like this: `scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )`
+RFC 2234 defines an ALPHA like this: `ALPHA = %x41-5A / %x61-7A`
+
+The WHATWG URL spec defines a scheme like this:
+`"A URL-scheme string must be one ASCII alpha, followed by zero or more of ASCII alphanumeric, U+002B (+), U+002D (-), and U+002E (.)."`
+(cherry picked from commit 439b9cfaf43080e91c4ad69f312f21fa098befc7)
+
+Co-authored-by: Ben Kallus <49924171+kenballus@users.noreply.github.com>
+
+Upstream-Status: Backport [https://github.com/python/cpython/commit/72d356e3584ebfb8e813a8e9f2cd3dccf233c0d9]
+CVE: CVE-2023-24329
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ Lib/test/test_urlparse.py | 18 ++++++++++++++++++
+ Lib/urllib/parse.py | 2 +-
+ ...22-11-12-15-45-51.gh-issue-99418.FxfAXS.rst | 2 ++
+ 3 files changed, 21 insertions(+), 1 deletion(-)
+ create mode 100644 Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+
+diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
+index 0ad3bf1..e1aa913 100644
+--- a/Lib/test/test_urlparse.py
++++ b/Lib/test/test_urlparse.py
+@@ -735,6 +735,24 @@ class UrlParseTestCase(unittest.TestCase):
+ with self.assertRaises(ValueError):
+ p.port
+
++ def test_attributes_bad_scheme(self):
++ """Check handling of invalid schemes."""
++ for bytes in (False, True):
++ for parse in (urllib.parse.urlsplit, urllib.parse.urlparse):
++ for scheme in (".", "+", "-", "0", "http&", "६http"):
++ with self.subTest(bytes=bytes, parse=parse, scheme=scheme):
++ url = scheme + "://www.example.net"
++ if bytes:
++ if url.isascii():
++ url = url.encode("ascii")
++ else:
++ continue
++ p = parse(url)
++ if bytes:
++ self.assertEqual(p.scheme, b"")
++ else:
++ self.assertEqual(p.scheme, "")
++
+ def test_attributes_without_netloc(self):
+ # This example is straight from RFC 3261. It looks like it
+ # should allow the username, hostname, and port to be filled
+diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py
+index 979e6d2..2e7a3e2 100644
+--- a/Lib/urllib/parse.py
++++ b/Lib/urllib/parse.py
+@@ -452,7 +452,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
+ clear_cache()
+ netloc = query = fragment = ''
+ i = url.find(':')
+- if i > 0:
++ if i > 0 and url[0].isascii() and url[0].isalpha():
+ if url[:i] == 'http': # optimize the common case
+ url = url[i+1:]
+ if url[:2] == '//':
+diff --git a/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst b/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+new file mode 100644
+index 0000000..0a06e7c
+--- /dev/null
++++ b/Misc/NEWS.d/next/Library/2022-11-12-15-45-51.gh-issue-99418.FxfAXS.rst
+@@ -0,0 +1,2 @@
++Fix bug in :func:`urllib.parse.urlparse` that causes URL schemes that begin
++with a digit, a plus sign, or a minus sign to be parsed incorrectly.
+--
+2.25.1
diff --git a/meta/recipes-devtools/python/python3/python3-manifest.json b/meta/recipes-devtools/python/python3/python3-manifest.json
index 3bcc9b8662..0e87f91dd8 100644
--- a/meta/recipes-devtools/python/python3/python3-manifest.json
+++ b/meta/recipes-devtools/python/python3/python3-manifest.json
@@ -531,7 +531,9 @@
"rdepends": [
"core"
],
- "files": [],
+ "files": [
+ "${libdir}/python${PYTHON_MAJMIN}/distutils/command/wininst-*.exe"
+ ],
"cached": []
},
"distutils": {
diff --git a/meta/recipes-devtools/python/python3_3.8.13.bb b/meta/recipes-devtools/python/python3_3.8.18.bb
index 040bacf97c..9d0f72ecf9 100644
--- a/meta/recipes-devtools/python/python3_3.8.13.bb
+++ b/meta/recipes-devtools/python/python3_3.8.18.bb
@@ -4,7 +4,7 @@ DESCRIPTION = "Python is a programming language that lets you work more quickly
LICENSE = "PSF-2.0 & BSD-0-Clause"
SECTION = "devel/python"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=c84eccf626bb6fde43e6ea5e28d8feb5"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=07fc4b9a9c0c0e48050ed38a5e72552b"
SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://run-ptest \
@@ -34,6 +34,7 @@ SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \
file://0001-python3-Do-not-hardcode-lib-for-distutils.patch \
file://0020-configure.ac-setup.py-do-not-add-a-curses-include-pa.patch \
file://makerace.patch \
+ file://CVE-2023-24329.patch \
"
SRC_URI_append_class-native = " \
@@ -42,8 +43,8 @@ SRC_URI_append_class-native = " \
file://0001-Don-t-search-system-for-headers-libraries.patch \
"
-SRC_URI[md5sum] = "c4b7100dcaace9d33ab1fda9a3a038d6"
-SRC_URI[sha256sum] = "6f309077012040aa39fe8f0c61db8c0fa1c45136763299d375c9e5756f09cf57"
+SRC_URI[md5sum] = "5ea6267ea00513fc31d3746feb35842d"
+SRC_URI[sha256sum] = "3ffb71cd349a326ba7b2fadc7e7df86ba577dd9c4917e52a8401adbda7405e3f"
# exclude pre-releases for both python 2.x and 3.x
UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P<pver>\d+(\.\d+)+).tar"
@@ -60,6 +61,8 @@ CVE_CHECK_WHITELIST += "CVE-2020-15523 CVE-2022-26488"
# The mailcap module is insecure by design, so this can't be fixed in a meaningful way.
# The module will be removed in the future and flaws documented.
CVE_CHECK_WHITELIST += "CVE-2015-20107"
+# Not an issue, in fact expected behaviour
+CVE_CHECK_WHITELIST += "CVE-2023-36632"
PYTHON_MAJMIN = "3.8"
diff --git a/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb b/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
index d83ee59375..5ae6a37f26 100644
--- a/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu-system-native_4.2.0.bb
@@ -9,7 +9,7 @@ DEPENDS = "glib-2.0-native zlib-native pixman-native qemu-native bison-native"
EXTRA_OECONF_append = " --target-list=${@get_qemu_system_target_list(d)}"
-PACKAGECONFIG ??= "fdt alsa kvm"
+PACKAGECONFIG ??= "fdt alsa kvm slirp"
# Handle distros such as CentOS 5 32-bit that do not have kvm support
PACKAGECONFIG_remove = "${@'kvm' if not os.path.exists('/usr/include/linux/kvm.h') else ''}"
diff --git a/meta/recipes-devtools/qemu/qemu.inc b/meta/recipes-devtools/qemu/qemu.inc
index 25c2cdef3a..59ff69d51d 100644
--- a/meta/recipes-devtools/qemu/qemu.inc
+++ b/meta/recipes-devtools/qemu/qemu.inc
@@ -98,6 +98,52 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \
file://CVE-2020-13253_4.patch \
file://CVE-2020-13253_5.patch \
file://CVE-2020-13791.patch \
+ file://CVE-2022-35414.patch \
+ file://CVE-2020-27821.patch \
+ file://CVE-2020-13754-1.patch \
+ file://CVE-2020-13754-2.patch \
+ file://CVE-2020-13754-3.patch \
+ file://CVE-2020-13754-4.patch \
+ file://CVE-2021-3713.patch \
+ file://CVE-2021-3748.patch \
+ file://CVE-2021-3930.patch \
+ file://CVE-2021-4206.patch \
+ file://CVE-2021-4207.patch \
+ file://CVE-2022-0216-1.patch \
+ file://CVE-2022-0216-2.patch \
+ file://CVE-2021-3750.patch \
+ file://CVE-2021-3638.patch \
+ file://CVE-2021-20196.patch \
+ file://CVE-2021-3507.patch \
+ file://hw-block-nvme-refactor-nvme_addr_read.patch \
+ file://hw-block-nvme-handle-dma-errors.patch \
+ file://CVE-2021-3929.patch \
+ file://CVE-2022-4144.patch \
+ file://CVE-2020-15859.patch \
+ file://CVE-2020-15469-1.patch \
+ file://CVE-2020-15469-2.patch \
+ file://CVE-2020-15469-3.patch \
+ file://CVE-2020-15469-4.patch \
+ file://CVE-2020-15469-5.patch \
+ file://CVE-2020-15469-6.patch \
+ file://CVE-2020-15469-7.patch \
+ file://CVE-2020-15469-8.patch \
+ file://CVE-2020-35504.patch \
+ file://CVE-2020-35505.patch \
+ file://CVE-2022-26354.patch \
+ file://CVE-2021-3409-1.patch \
+ file://CVE-2021-3409-2.patch \
+ file://CVE-2021-3409-3.patch \
+ file://CVE-2021-3409-4.patch \
+ file://CVE-2021-3409-5.patch \
+ file://hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch \
+ file://CVE-2023-0330.patch \
+ file://CVE-2023-3354.patch \
+ file://CVE-2023-3180.patch \
+ file://CVE-2020-24165.patch \
+ file://CVE-2023-5088.patch \
+ file://9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch \
+ file://CVE-2023-2861.patch \
"
UPSTREAM_CHECK_REGEX = "qemu-(?P<pver>\d+(\.\d+)+)\.tar"
@@ -115,6 +161,21 @@ CVE_CHECK_WHITELIST += "CVE-2007-0998"
# https://bugzilla.redhat.com/show_bug.cgi?id=1609015#c11
CVE_CHECK_WHITELIST += "CVE-2018-18438"
+# the issue introduced in v5.1.0-rc0
+CVE_CHECK_WHITELIST += "CVE-2020-27661"
+
+# As per https://nvd.nist.gov/vuln/detail/CVE-2023-0664
+# https://bugzilla.redhat.com/show_bug.cgi?id=2167423
+# this bug related to windows specific.
+CVE_CHECK_WHITELIST += "CVE-2023-0664"
+
+# As per https://bugzilla.redhat.com/show_bug.cgi?id=2203387
+# RHEL specific issue
+CVE_CHECK_WHITELIST += "CVE-2023-2680"
+
+# Affected only `qemu-kvm` shipped with Red Hat Enterprise Linux 8.3 release.
+CVE_CHECK_WHITELIST += "CVE-2021-20295"
+
COMPATIBLE_HOST_mipsarchn32 = "null"
COMPATIBLE_HOST_mipsarchn64 = "null"
@@ -254,6 +315,15 @@ PACKAGECONFIG[xkbcommon] = "--enable-xkbcommon,--disable-xkbcommon,libxkbcommon"
PACKAGECONFIG[libudev] = "--enable-libudev,--disable-libudev,eudev"
PACKAGECONFIG[libxml2] = "--enable-libxml2,--disable-libxml2,libxml2"
PACKAGECONFIG[seccomp] = "--enable-seccomp,--disable-seccomp,libseccomp"
+PACKAGECONFIG[capstone] = "--enable-capstone,--disable-capstone"
+# libnfs is currently provided by meta-kodi
+PACKAGECONFIG[libnfs] = "--enable-libnfs,--disable-libnfs,libnfs"
+PACKAGECONFIG[brlapi] = "--enable-brlapi,--disable-brlapi"
+PACKAGECONFIG[vde] = "--enable-vde,--disable-vde"
+# version 4.2.0 doesn't have an "internal" option for enable-slirp, so use "git" which uses the same configure code path
+PACKAGECONFIG[slirp] = "--enable-slirp=git,--disable-slirp"
+PACKAGECONFIG[rbd] = "--enable-rbd,--disable-rbd"
+PACKAGECONFIG[rdma] = "--enable-rdma,--disable-rdma"
INSANE_SKIP_${PN} = "arch"
diff --git a/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch b/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
index 3a7d7bbd33..3789f1edea 100644
--- a/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
+++ b/meta/recipes-devtools/qemu/qemu/0012-fix-libcap-header-issue-on-some-distro.patch
@@ -60,7 +60,7 @@ Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
-index 6f132c5f..8329950c 100644
+index 300c9765..2823db7d 100644
--- a/fsdev/virtfs-proxy-helper.c
+++ b/fsdev/virtfs-proxy-helper.c
@@ -13,7 +13,6 @@
@@ -71,9 +71,9 @@ index 6f132c5f..8329950c 100644
#include <sys/fsuid.h>
#include <sys/vfs.h>
#include <sys/ioctl.h>
-@@ -27,7 +26,11 @@
- #include "9p-iov-marshal.h"
+@@ -28,7 +27,11 @@
#include "hw/9pfs/9p-proxy.h"
+ #include "hw/9pfs/9p-util.h"
#include "fsdev/9p-iov-marshal.h"
-
+/*
@@ -84,3 +84,6 @@ index 6f132c5f..8329950c 100644
#define PROGNAME "virtfs-proxy-helper"
#ifndef XFS_SUPER_MAGIC
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch b/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch
new file mode 100644
index 0000000000..72d9c47bde
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/9pfs-local-ignore-O_NOATIME-if-we-don-t-have-permiss.patch
@@ -0,0 +1,63 @@
+From a5804fcf7b22fc7d1f9ec794dd284c7d504bd16b Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Thu, 14 May 2020 08:06:43 +0200
+Subject: [PATCH] 9pfs: local: ignore O_NOATIME if we don't have permissions
+
+QEMU's local 9pfs server passes through O_NOATIME from the client. If
+the QEMU process doesn't have permissions to use O_NOATIME (namely, it
+does not own the file nor have the CAP_FOWNER capability), the open will
+fail. This causes issues when from the client's point of view, it
+believes it has permissions to use O_NOATIME (e.g., a process running as
+root in the virtual machine). Additionally, overlayfs on Linux opens
+files on the lower layer using O_NOATIME, so in this case a 9pfs mount
+can't be used as a lower layer for overlayfs (cf.
+https://github.com/osandov/drgn/blob/dabfe1971951701da13863dbe6d8a1d172ad9650/vmtest/onoatimehack.c
+and https://github.com/NixOS/nixpkgs/issues/54509).
+
+Luckily, O_NOATIME is effectively a hint, and is often ignored by, e.g.,
+network filesystems. open(2) notes that O_NOATIME "may not be effective
+on all filesystems. One example is NFS, where the server maintains the
+access time." This means that we can honor it when possible but fall
+back to ignoring it.
+
+Acked-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Message-Id: <e9bee604e8df528584693a4ec474ded6295ce8ad.1587149256.git.osandov@fb.com>
+Signed-off-by: Greg Kurz <groug@kaod.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/a5804fcf7b22fc7d1f9ec794dd284c7d504bd16b]
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/9pfs/9p-util.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
+index 79ed6b233e5..546f46dc7dc 100644
+--- a/hw/9pfs/9p-util.h
++++ b/hw/9pfs/9p-util.h
+@@ -37,9 +37,22 @@ static inline int openat_file(int dirfd, const char *name, int flags,
+ {
+ int fd, serrno, ret;
+
++again:
+ fd = openat(dirfd, name, flags | O_NOFOLLOW | O_NOCTTY | O_NONBLOCK,
+ mode);
+ if (fd == -1) {
++ if (errno == EPERM && (flags & O_NOATIME)) {
++ /*
++ * The client passed O_NOATIME but we lack permissions to honor it.
++ * Rather than failing the open, fall back without O_NOATIME. This
++ * doesn't break the semantics on the client side, as the Linux
++ * open(2) man page notes that O_NOATIME "may not be effective on
++ * all filesystems". In particular, NFS and other network
++ * filesystems ignore it entirely.
++ */
++ flags &= ~O_NOATIME;
++ goto again;
++ }
+ return -1;
+ }
+
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch
new file mode 100644
index 0000000000..fdfff9d81d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-1.patch
@@ -0,0 +1,91 @@
+From 5d971f9e672507210e77d020d89e0e89165c8fc9 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Wed, 10 Jun 2020 09:47:49 -0400
+Subject: [PATCH] memory: Revert "memory: accept mismatching sizes in
+ memory_region_access_valid"
+
+Memory API documentation documents valid .min_access_size and .max_access_size
+fields and explains that any access outside these boundaries is blocked.
+
+This is what devices seem to assume.
+
+However this is not what the implementation does: it simply
+ignores the boundaries unless there's an "accepts" callback.
+
+Naturally, this breaks a bunch of devices.
+
+Revert to the documented behaviour.
+
+Devices that want to allow any access can just drop the valid field,
+or add the impl field to have accesses converted to appropriate
+length.
+
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Richard Henderson <rth@twiddle.net>
+Fixes: CVE-2020-13754
+Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1842363
+Fixes: a014ed07bd5a ("memory: accept mismatching sizes in memory_region_access_valid")
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Message-Id: <20200610134731.1514409-1-mst@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=5d971f9e672507210e77d020d89e0e89165c8fc9
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ memory.c | 29 +++++++++--------------------
+ 1 file changed, 9 insertions(+), 20 deletions(-)
+
+diff --git a/memory.c b/memory.c
+index 2f15a4b..9200b20 100644
+--- a/memory.c
++++ b/memory.c
+@@ -1352,35 +1352,24 @@ bool memory_region_access_valid(MemoryRegion *mr,
+ bool is_write,
+ MemTxAttrs attrs)
+ {
+- int access_size_min, access_size_max;
+- int access_size, i;
+-
+- if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
++ if (mr->ops->valid.accepts
++ && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
+ return false;
+ }
+
+- if (!mr->ops->valid.accepts) {
+- return true;
+- }
+-
+- access_size_min = mr->ops->valid.min_access_size;
+- if (!mr->ops->valid.min_access_size) {
+- access_size_min = 1;
++ if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
++ return false;
+ }
+
+- access_size_max = mr->ops->valid.max_access_size;
++ /* Treat zero as compatibility all valid */
+ if (!mr->ops->valid.max_access_size) {
+- access_size_max = 4;
++ return true;
+ }
+
+- access_size = MAX(MIN(size, access_size_max), access_size_min);
+- for (i = 0; i < size; i += access_size) {
+- if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
+- is_write, attrs)) {
+- return false;
+- }
++ if (size > mr->ops->valid.max_access_size
++ || size < mr->ops->valid.min_access_size) {
++ return false;
+ }
+-
+ return true;
+ }
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch
new file mode 100644
index 0000000000..7354edc54d
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-2.patch
@@ -0,0 +1,69 @@
+From dba04c3488c4699f5afe96f66e448b1d447cf3fb Mon Sep 17 00:00:00 2001
+From: Michael Tokarev <mjt@tls.msk.ru>
+Date: Mon, 20 Jul 2020 19:06:27 +0300
+Subject: [PATCH] acpi: accept byte and word access to core ACPI registers
+
+All ISA registers should be accessible as bytes, words or dwords
+(if wide enough). Fix the access constraints for acpi-pm-evt,
+acpi-pm-tmr & acpi-cnt registers.
+
+Fixes: 5d971f9e67 (memory: Revert "memory: accept mismatching sizes in memory_region_access_valid")
+Fixes: afafe4bbe0 (apci: switch cnt to memory api)
+Fixes: 77d58b1e47 (apci: switch timer to memory api)
+Fixes: b5a7c024d2 (apci: switch evt to memory api)
+Buglink: https://lore.kernel.org/xen-devel/20200630170913.123646-1-anthony.perard@citrix.com/T/
+Buglink: https://bugs.debian.org/964793
+BugLink: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=964247
+BugLink: https://bugs.launchpad.net/bugs/1886318
+Reported-By: Simon John <git@the-jedi.co.uk>
+Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
+Message-Id: <20200720160627.15491-1-mjt@msgid.tls.msk.ru>
+Cc: qemu-stable@nongnu.org
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=dba04c3488c4699f5afe96f66e448b1d447cf3fb
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/acpi/core.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/hw/acpi/core.c b/hw/acpi/core.c
+index f6d9ec4..ac06db3 100644
+--- a/hw/acpi/core.c
++++ b/hw/acpi/core.c
+@@ -458,7 +458,8 @@ static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_evt_ops = {
+ .read = acpi_pm_evt_read,
+ .write = acpi_pm_evt_write,
+- .valid.min_access_size = 2,
++ .impl.min_access_size = 2,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 2,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+@@ -527,7 +528,8 @@ static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_tmr_ops = {
+ .read = acpi_pm_tmr_read,
+ .write = acpi_pm_tmr_write,
+- .valid.min_access_size = 4,
++ .impl.min_access_size = 4,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+@@ -599,7 +601,8 @@ static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
+ static const MemoryRegionOps acpi_pm_cnt_ops = {
+ .read = acpi_pm_cnt_read,
+ .write = acpi_pm_cnt_write,
+- .valid.min_access_size = 2,
++ .impl.min_access_size = 2,
++ .valid.min_access_size = 1,
+ .valid.max_access_size = 2,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch
new file mode 100644
index 0000000000..2a8781050f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-3.patch
@@ -0,0 +1,65 @@
+From 8e67fda2dd6202ccec093fda561107ba14830a17 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Tue, 21 Jul 2020 10:33:22 +0200
+Subject: [PATCH] xhci: fix valid.max_access_size to access address registers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+QEMU XHCI advertises AC64 (64-bit addressing) but doesn't allow
+64-bit mode access in "runtime" and "operational" MemoryRegionOps.
+
+Set the max_access_size based on sizeof(dma_addr_t) as AC64 is set.
+
+XHCI specs:
+"If the xHC supports 64-bit addressing (AC64 = â1â), then software
+should write 64-bit registers using only Qword accesses. If a
+system is incapable of issuing Qword accesses, then writes to the
+64-bit address fields shall be performed using 2 Dword accesses;
+low Dword-first, high-Dword second. If the xHC supports 32-bit
+addressing (AC64 = â0â), then the high Dword of registers containing
+64-bit address fields are unused and software should write addresses
+using only Dword accesses"
+
+The problem has been detected with SLOF, as linux kernel always accesses
+registers using 32-bit access even if AC64 is set and revealed by
+5d971f9e6725 ("memory: Revert "memory: accept mismatching sizes in memory_region_access_valid"")
+
+Suggested-by: Alexey Kardashevskiy <aik@au1.ibm.com>
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Message-id: 20200721083322.90651-1-lvivier@redhat.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=8e67fda2dd6202ccec093fda561107ba14830a17
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/usb/hcd-xhci.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
+index b330e36..67a18fe 100644
+--- a/hw/usb/hcd-xhci.c
++++ b/hw/usb/hcd-xhci.c
+@@ -3184,7 +3184,7 @@ static const MemoryRegionOps xhci_oper_ops = {
+ .read = xhci_oper_read,
+ .write = xhci_oper_write,
+ .valid.min_access_size = 4,
+- .valid.max_access_size = 4,
++ .valid.max_access_size = sizeof(dma_addr_t),
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+@@ -3200,7 +3200,7 @@ static const MemoryRegionOps xhci_runtime_ops = {
+ .read = xhci_runtime_read,
+ .write = xhci_runtime_write,
+ .valid.min_access_size = 4,
+- .valid.max_access_size = 4,
++ .valid.max_access_size = sizeof(dma_addr_t),
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch
new file mode 100644
index 0000000000..6bad07d03f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-13754-4.patch
@@ -0,0 +1,39 @@
+From 70b78d4e71494c90d2ccb40381336bc9b9a22f79 Mon Sep 17 00:00:00 2001
+From: Alistair Francis <alistair.francis@wdc.com>
+Date: Tue, 30 Jun 2020 13:12:11 -0700
+Subject: [PATCH] hw/riscv: Allow 64 bit access to SiFive CLINT
+
+Commit 5d971f9e672507210e77d020d89e0e89165c8fc9
+"memory: Revert "memory: accept mismatching sizes in
+memory_region_access_valid"" broke most RISC-V boards as they do 64 bit
+accesses to the CLINT and QEMU would trigger a fault. Fix this failure
+by allowing 8 byte accesses.
+
+Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
+Reviewed-by: LIU Zhiwei<zhiwei_liu@c-sky.com>
+Message-Id: <122b78825b077e4dfd39b444d3a46fe894a7804c.1593547870.git.alistair.francis@wdc.com>
+
+https://git.qemu.org/?p=qemu.git;a=patch;h=70b78d4e71494c90d2ccb40381336bc9b9a22f79
+CVE: CVE-2020-13754
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/riscv/sifive_clint.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/riscv/sifive_clint.c b/hw/riscv/sifive_clint.c
+index b11ffa0..669c21a 100644
+--- a/hw/riscv/sifive_clint.c
++++ b/hw/riscv/sifive_clint.c
+@@ -181,7 +181,7 @@ static const MemoryRegionOps sifive_clint_ops = {
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+- .max_access_size = 4
++ .max_access_size = 8
+ }
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch
new file mode 100644
index 0000000000..20f39f0a26
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-1.patch
@@ -0,0 +1,50 @@
+From 520f26fc6d17b71a43eaf620e834b3bdf316f3d3 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:25 +0530
+Subject: [PATCH] hw/pci-host: add pci-intack write method
+
+Add pci-intack mmio write method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-2-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu
+https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/qemu/qemu/commit/520f26fc6d17b71a43eaf620e834b3bdf316f3d3 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/pci-host/prep.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/hw/pci-host/prep.c
++++ b/hw/pci-host/prep.c
+@@ -26,6 +26,7 @@
+ #include "qemu/osdep.h"
+ #include "qemu-common.h"
+ #include "qemu/units.h"
++#include "qemu/log.h"
+ #include "qapi/error.h"
+ #include "hw/pci/pci.h"
+ #include "hw/pci/pci_bus.h"
+@@ -119,8 +120,15 @@ static uint64_t raven_intack_read(void *
+ return pic_read_irq(isa_pic);
+ }
+
++static void raven_intack_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__);
++}
++
+ static const MemoryRegionOps raven_intack_ops = {
+ .read = raven_intack_read,
++ .write = raven_intack_write,
+ .valid = {
+ .max_access_size = 1,
+ },
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch
new file mode 100644
index 0000000000..d6715d337c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-2.patch
@@ -0,0 +1,69 @@
+From 4f2a5202a05fc1612954804a2482f07bff105ea2 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:26 +0530
+Subject: [PATCH] pci-host: designware: add pcie-msi read method
+
+Add pcie-msi mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-3-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-2.patch?h=ubuntu/focal-security Upstream Commit https://github.com/qemu/qemu/commit/4f2a5202a05fc1612954804a2482f07bff105ea2]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/pci-host/designware.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c
+index f9fb97a..bde3a34 100644
+--- a/hw/pci-host/designware.c
++++ b/hw/pci-host/designware.c
+@@ -21,6 +21,7 @@
+ #include "qemu/osdep.h"
+ #include "qapi/error.h"
+ #include "qemu/module.h"
++#include "qemu/log.h"
+ #include "hw/pci/msi.h"
+ #include "hw/pci/pci_bridge.h"
+ #include "hw/pci/pci_host.h"
+@@ -63,6 +64,23 @@ designware_pcie_root_to_host(DesignwarePCIERoot *root)
+ return DESIGNWARE_PCIE_HOST(bus->parent);
+ }
+
++static uint64_t designware_pcie_root_msi_read(void *opaque, hwaddr addr,
++ unsigned size)
++{
++ /*
++ * Attempts to read from the MSI address are undefined in
++ * the PCI specifications. For this hardware, the datasheet
++ * specifies that a read from the magic address is simply not
++ * intercepted by the MSI controller, and will go out to the
++ * AHB/AXI bus like any other PCI-device-initiated DMA read.
++ * This is not trivial to implement in QEMU, so since
++ * well-behaved guests won't ever ask a PCI device to DMA from
++ * this address we just log the missing functionality.
++ */
++ qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__);
++ return 0;
++}
++
+ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned len)
+ {
+@@ -77,6 +95,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr,
+ }
+
+ static const MemoryRegionOps designware_pci_host_msi_ops = {
++ .read = designware_pcie_root_msi_read,
+ .write = designware_pcie_root_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch
new file mode 100644
index 0000000000..85abe8ff32
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-3.patch
@@ -0,0 +1,49 @@
+From 24202d2b561c3b4c48bd28383c8c34b4ac66c2bf Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:27 +0530
+Subject: [PATCH] vfio: add quirk device write method
+
+Add vfio quirk device mmio write method to avoid NULL pointer
+dereference issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Acked-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-4-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-3.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/24202d2b561c3b4c48bd28383c8c34b4ac66c2bf]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/vfio/pci-quirks.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/hw/vfio/pci-quirks.c
++++ b/hw/vfio/pci-quirks.c
+@@ -13,6 +13,7 @@
+ #include "qemu/osdep.h"
+ #include "exec/memop.h"
+ #include "qemu/units.h"
++#include "qemu/log.h"
+ #include "qemu/error-report.h"
+ #include "qemu/main-loop.h"
+ #include "qemu/module.h"
+@@ -278,8 +279,15 @@ static uint64_t vfio_ati_3c3_quirk_read(
+ return data;
+ }
+
++static void vfio_ati_3c3_quirk_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++}
++
+ static const MemoryRegionOps vfio_ati_3c3_quirk = {
+ .read = vfio_ati_3c3_quirk_read,
++ .write = vfio_ati_3c3_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch
new file mode 100644
index 0000000000..52fac8a051
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-4.patch
@@ -0,0 +1,53 @@
+From f867cebaedbc9c43189f102e4cdfdff05e88df7f Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:28 +0530
+Subject: [PATCH] prep: add ppc-parity write method
+
+Add ppc-parity mmio write method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-5-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-4.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/f867cebaedbc9c43189f102e4cdfdff05e88df7f]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/ppc/prep_systemio.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c
+index 4e48ef2..b2bd783 100644
+--- a/hw/ppc/prep_systemio.c
++++ b/hw/ppc/prep_systemio.c
+@@ -23,6 +23,7 @@
+ */
+
+ #include "qemu/osdep.h"
++#include "qemu/log.h"
+ #include "hw/irq.h"
+ #include "hw/isa/isa.h"
+ #include "hw/qdev-properties.h"
+@@ -235,8 +236,15 @@ static uint64_t ppc_parity_error_readl(void *opaque, hwaddr addr,
+ return val;
+ }
+
++static void ppc_parity_error_writel(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++}
++
+ static const MemoryRegionOps ppc_parity_error_ops = {
+ .read = ppc_parity_error_readl,
++ .write = ppc_parity_error_writel,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch
new file mode 100644
index 0000000000..49c6c5e3e2
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-5.patch
@@ -0,0 +1,53 @@
+From b5bf601f364e1a14ca4c3276f88dfec024acf613 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:29 +0530
+Subject: [PATCH] nvram: add nrf51_soc flash read method
+
+Add nrf51_soc mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-6-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-5.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/b5bf601f364e1a14ca4c3276f88dfec024acf613 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/nvram/nrf51_nvm.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c
+index f2283c1..7b3460d 100644
+--- a/hw/nvram/nrf51_nvm.c
++++ b/hw/nvram/nrf51_nvm.c
+@@ -273,6 +273,15 @@ static const MemoryRegionOps io_ops = {
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
++static uint64_t flash_read(void *opaque, hwaddr offset, unsigned size)
++{
++ /*
++ * This is a rom_device MemoryRegion which is always in
++ * romd_mode (we never put it in MMIO mode), so reads always
++ * go directly to RAM and never come here.
++ */
++ g_assert_not_reached();
++}
+
+ static void flash_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned int size)
+@@ -300,6 +309,7 @@ static void flash_write(void *opaque, hwaddr offset, uint64_t value,
+
+
+ static const MemoryRegionOps flash_ops = {
++ .read = flash_read,
+ .write = flash_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch
new file mode 100644
index 0000000000..115be68295
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-6.patch
@@ -0,0 +1,61 @@
+Backport of:
+
+From 921604e175b8ec06c39503310e7b3ec1e3eafe9e Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:30 +0530
+Subject: [PATCH] spapr_pci: add spapr msi read method
+
+Add spapr msi mmio read method to avoid NULL pointer dereference
+issue.
+
+Reported-by: Lei Sun <slei.casper@gmail.com>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-7-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-6.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/921604e175b8ec06c39503310e7b3ec1e3eafe9e]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/ppc/spapr_pci.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/hw/ppc/spapr_pci.c
++++ b/hw/ppc/spapr_pci.c
+@@ -52,6 +52,7 @@
+ #include "sysemu/kvm.h"
+ #include "sysemu/hostmem.h"
+ #include "sysemu/numa.h"
++#include "qemu/log.h"
+
+ /* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
+ #define RTAS_QUERY_FN 0
+@@ -738,6 +739,12 @@ static PCIINTxRoute spapr_route_intx_pin
+ return route;
+ }
+
++static uint64_t spapr_msi_read(void *opaque, hwaddr addr, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__);
++ return 0;
++}
++
+ /*
+ * MSI/MSIX memory region implementation.
+ * The handler handles both MSI and MSIX.
+@@ -755,8 +762,11 @@ static void spapr_msi_write(void *opaque
+ }
+
+ static const MemoryRegionOps spapr_msi_ops = {
+- /* There is no .read as the read result is undefined by PCI spec */
+- .read = NULL,
++ /*
++ * .read result is undefined by PCI spec.
++ * define .read method to avoid assert failure in memory_region_init_io
++ */
++ .read = spapr_msi_read,
+ .write = spapr_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN
+ };
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch
new file mode 100644
index 0000000000..7d8ec32251
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-7.patch
@@ -0,0 +1,50 @@
+From 2c9fb3b784000c1df32231e1c2464bb2e3fc4620 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:31 +0530
+Subject: [PATCH] tz-ppc: add dummy read/write methods
+
+Add tz-ppc-dummy mmio read/write methods to avoid assert failure
+during initialisation.
+
+Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Message-Id: <20200811114133.672647-8-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-7.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/2c9fb3b784000c1df32231e1c2464bb2e3fc4620 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/misc/tz-ppc.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c
+index 6431257..36495c6 100644
+--- a/hw/misc/tz-ppc.c
++++ b/hw/misc/tz-ppc.c
+@@ -196,7 +196,21 @@ static bool tz_ppc_dummy_accepts(void *opaque, hwaddr addr,
+ g_assert_not_reached();
+ }
+
++static uint64_t tz_ppc_dummy_read(void *opaque, hwaddr addr, unsigned size)
++{
++ g_assert_not_reached();
++}
++
++static void tz_ppc_dummy_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ g_assert_not_reached();
++}
++
+ static const MemoryRegionOps tz_ppc_dummy_ops = {
++ /* define r/w methods to avoid assert failure in memory_region_init_io */
++ .read = tz_ppc_dummy_read,
++ .write = tz_ppc_dummy_write,
+ .valid.accepts = tz_ppc_dummy_accepts,
+ };
+
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch
new file mode 100644
index 0000000000..7857ba266e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15469-8.patch
@@ -0,0 +1,44 @@
+From 735754aaa15a6ed46db51fd731e88331c446ea54 Mon Sep 17 00:00:00 2001
+From: Prasad J Pandit <pjp@fedoraproject.org>
+Date: Tue, 11 Aug 2020 17:11:32 +0530
+Subject: [PATCH] imx7-ccm: add digprog mmio write method
+
+Add digprog mmio write method to avoid assert failure during
+initialisation.
+
+Reviewed-by: Li Qiang <liq3ea@gmail.com>
+Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
+Message-Id: <20200811114133.672647-9-ppandit@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+CVE: CVE-2020-15469
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-15469-8.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/735754aaa15a6ed46db51fd731e88331c446ea54]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/misc/imx7_ccm.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c
+index 02fc1ae..075159e 100644
+--- a/hw/misc/imx7_ccm.c
++++ b/hw/misc/imx7_ccm.c
+@@ -131,8 +131,16 @@ static const struct MemoryRegionOps imx7_set_clr_tog_ops = {
+ },
+ };
+
++static void imx7_digprog_write(void *opaque, hwaddr addr,
++ uint64_t data, unsigned size)
++{
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "Guest write to read-only ANALOG_DIGPROG register\n");
++}
++
+ static const struct MemoryRegionOps imx7_digprog_ops = {
+ .read = imx7_set_clr_tog_read,
++ .write = imx7_digprog_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch
new file mode 100644
index 0000000000..0f43adeea8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-15859.patch
@@ -0,0 +1,39 @@
+From 22dc8663d9fc7baa22100544c600b6285a63c7a3 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 22 Jul 2020 16:57:46 +0800
+Subject: [PATCH] net: forbid the reentrant RX
+
+The memory API allows DMA into NIC's MMIO area. This means the NIC's
+RX routine must be reentrant. Instead of auditing all the NIC, we can
+simply detect the reentrancy and return early. The queue->delivering
+is set and cleared by qemu_net_queue_deliver() for other queue helpers
+to know whether the delivering in on going (NIC's receive is being
+called). We can check it and return early in qemu_net_queue_flush() to
+forbid reentrant RX.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+CVE: CVE-2020-15859
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/ubuntu/CVE-2020-15859.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/22dc8663d9fc7baa22100544c600b6285a63c7a3 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ net/queue.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/queue.c b/net/queue.c
+index 0164727..19e32c8 100644
+--- a/net/queue.c
++++ b/net/queue.c
+@@ -250,6 +250,9 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
+
+ bool qemu_net_queue_flush(NetQueue *queue)
+ {
++ if (queue->delivering)
++ return false;
++
+ while (!QTAILQ_EMPTY(&queue->packets)) {
+ NetPacket *packet;
+ int ret;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch
new file mode 100644
index 0000000000..e0a27331a8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-24165.patch
@@ -0,0 +1,94 @@
+CVE: CVE-2020-24165
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/886cc68943ebe8cf7e5f970be33459f95068a441 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 886cc68943ebe8cf7e5f970be33459f95068a441 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Alex=20Benn=C3=A9e?= <alex.bennee@linaro.org>
+Date: Fri, 14 Feb 2020 14:49:52 +0000
+Subject: [PATCH] accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The bug describes a race whereby cpu_exec_step_atomic can acquire a TB
+which is invalidated by a tb_flush before we execute it. This doesn't
+affect the other cpu_exec modes as a tb_flush by it's nature can only
+occur on a quiescent system. The race was described as:
+
+ B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
+ B3. tcg_tb_alloc obtains a new TB
+
+ C3. TB obtained with tb_lookup__cpu_state or tb_gen_code
+ (same TB as B2)
+
+ A3. start_exclusive critical section entered
+ A4. do_tb_flush is called, TB memory freed/re-allocated
+ A5. end_exclusive exits critical section
+
+ B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code
+ B3. tcg_tb_alloc reallocates TB from B2
+
+ C4. start_exclusive critical section entered
+ C5. cpu_tb_exec executes the TB code that was free in A4
+
+The simplest fix is to widen the exclusive period to include the TB
+lookup. As a result we can drop the complication of checking we are in
+the exclusive region before we end it.
+
+Cc: Yifan <me@yifanlu.com>
+Buglink: https://bugs.launchpad.net/qemu/+bug/1863025
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
+Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
+Message-Id: <20200214144952.15502-1-alex.bennee@linaro.org>
+Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
+---
+ accel/tcg/cpu-exec.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
+index 2560c90eec79..d95c4848a47b 100644
+--- a/accel/tcg/cpu-exec.c
++++ b/accel/tcg/cpu-exec.c
+@@ -240,6 +240,8 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ uint32_t cf_mask = cflags & CF_HASH_MASK;
+
+ if (sigsetjmp(cpu->jmp_env, 0) == 0) {
++ start_exclusive();
++
+ tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ if (tb == NULL) {
+ mmap_lock();
+@@ -247,8 +249,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ mmap_unlock();
+ }
+
+- start_exclusive();
+-
+ /* Since we got here, we know that parallel_cpus must be true. */
+ parallel_cpus = false;
+ cc->cpu_exec_enter(cpu);
+@@ -271,14 +271,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
+ qemu_plugin_disable_mem_helpers(cpu);
+ }
+
+- if (cpu_in_exclusive_context(cpu)) {
+- /* We might longjump out of either the codegen or the
+- * execution, so must make sure we only end the exclusive
+- * region if we started it.
+- */
+- parallel_cpus = true;
+- end_exclusive();
+- }
++
++ /*
++ * As we start the exclusive region before codegen we must still
++ * be in the region if we longjump out of either the codegen or
++ * the execution.
++ */
++ g_assert(cpu_in_exclusive_context(cpu));
++ parallel_cpus = true;
++ end_exclusive();
+ }
+
+ struct tb_desc {
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch
new file mode 100644
index 0000000000..e26bc31bbb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-27821.patch
@@ -0,0 +1,73 @@
+From 15222d4636d742f3395fd211fad0cd7e36d9f43e Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 16 Aug 2022 10:07:01 +0530
+Subject: [PATCH] CVE-2020-27821
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=4bfb024bc76973d40a359476dc0291f46e435442]
+CVE: CVE-2020-27821
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+memory: clamp cached translation in case it points to an MMIO region
+
+In using the address_space_translate_internal API, address_space_cache_init
+forgot one piece of advice that can be found in the code for
+address_space_translate_internal:
+
+ /* MMIO registers can be expected to perform full-width accesses based only
+ * on their address, without considering adjacent registers that could
+ * decode to completely different MemoryRegions. When such registers
+ * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
+ * regions overlap wildly. For this reason we cannot clamp the accesses
+ * here.
+ *
+ * If the length is small (as is the case for address_space_ldl/stl),
+ * everything works fine. If the incoming length is large, however,
+ * the caller really has to do the clamping through memory_access_size.
+ */
+
+address_space_cache_init is exactly one such case where "the incoming length
+is large", therefore we need to clamp the resulting length---not to
+memory_access_size though, since we are not doing an access yet, but to
+the size of the resulting section. This ensures that subsequent accesses
+to the cached MemoryRegionSection will be in range.
+
+With this patch, the enclosed testcase notices that the used ring does
+not fit into the MSI-X table and prints a "qemu-system-x86_64: Cannot map used"
+error.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+---
+ exec.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/exec.c b/exec.c
+index 2d6add46..1360051a 100644
+--- a/exec.c
++++ b/exec.c
+@@ -3632,6 +3632,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
+ AddressSpaceDispatch *d;
+ hwaddr l;
+ MemoryRegion *mr;
++ Int128 diff;
+
+ assert(len > 0);
+
+@@ -3640,6 +3641,15 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
+ d = flatview_to_dispatch(cache->fv);
+ cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true);
+
++ /*
++ * cache->xlat is now relative to cache->mrs.mr, not to the section itself.
++ * Take that into account to compute how many bytes are there between
++ * cache->xlat and the end of the section.
++ */
++ diff = int128_sub(cache->mrs.size,
++ int128_make64(cache->xlat - cache->mrs.offset_within_region));
++ l = int128_get64(int128_min(diff, int128_make64(l)));
++
+ mr = cache->mrs.mr;
+ memory_region_ref(mr);
+ if (memory_access_is_direct(mr, is_write)) {
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch
new file mode 100644
index 0000000000..97d32589d8
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-35504.patch
@@ -0,0 +1,51 @@
+Backport of:
+
+From 0db895361b8a82e1114372ff9f4857abea605701 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 7 Apr 2021 20:57:50 +0100
+Subject: [PATCH] esp: always check current_req is not NULL before use in DMA
+ callbacks
+
+After issuing a SCSI command the SCSI layer can call the SCSIBusInfo .cancel
+callback which resets both current_req and current_dev to NULL. If any data
+is left in the transfer buffer (async_len != 0) then the next TI (Transfer
+Information) command will attempt to reference the NULL pointer causing a
+segfault.
+
+Buglink: https://bugs.launchpad.net/qemu/+bug/1910723
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20210407195801.685-2-mark.cave-ayland@ilande.co.uk>
+
+CVE: CVE-2020-35504
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-35504.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/0db895361b8a82e1114372ff9f4857abea605701 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/esp.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/hw/scsi/esp.c
++++ b/hw/scsi/esp.c
+@@ -362,6 +362,11 @@ static void do_dma_pdma_cb(ESPState *s)
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
++
++ if (!s->current_req) {
++ return;
++ }
++
+ s->dma_left -= len;
+ s->async_buf += len;
+ s->async_len -= len;
+@@ -415,6 +420,9 @@ static void esp_do_dma(ESPState *s)
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
++ if (!s->current_req) {
++ return;
++ }
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch b/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch
new file mode 100644
index 0000000000..40c0b1e74f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2020-35505.patch
@@ -0,0 +1,45 @@
+Backport of:
+
+From 99545751734035b76bd372c4e7215bb337428d89 Mon Sep 17 00:00:00 2001
+From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Date: Wed, 7 Apr 2021 20:57:55 +0100
+Subject: [PATCH] esp: ensure cmdfifo is not empty and current_dev is non-NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+When about to execute a SCSI command, ensure that cmdfifo is not empty and
+current_dev is non-NULL. This can happen if the guest tries to execute a TI
+(Transfer Information) command without issuing one of the select commands
+first.
+
+Buglink: https://bugs.launchpad.net/qemu/+bug/1910723
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909247
+Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20210407195801.685-7-mark.cave-ayland@ilande.co.uk>
+
+CVE: CVE-2020-35505
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2020-35505.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/99545751734035b76bd372c4e7215bb337428d89 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+Signed-off-by: Emily Vekariya <emily.vekariya@einfochips.com>
+---
+ hw/scsi/esp.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
+index c7d701bf..c2a67bc8 100644
+--- a/hw/scsi/esp.c
++++ b/hw/scsi/esp.c
+@@ -193,6 +193,10 @@ static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
+
+ trace_esp_do_busid_cmd(busid);
+ lun = busid & 7;
++
++ if (!s->current_dev) {
++ return;
++ }
+ current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
+ s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
+ datalen = scsi_req_enqueue(s->current_req);
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch
new file mode 100644
index 0000000000..e9b815740f
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-20196.patch
@@ -0,0 +1,62 @@
+From 94608c59045791dfd35102bc59b792e96f2cfa30 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Tue, 29 Nov 2022 15:57:13 +0530
+Subject: [PATCH] CVE-2021-20196
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/1ab95af033a419e7a64e2d58e67dd96b20af5233]
+CVE: CVE-2021-20196
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+hw/block/fdc: Kludge missing floppy drive to fix CVE-2021-20196
+
+Guest might select another drive on the bus by setting the
+DRIVE_SEL bit of the DIGITAL OUTPUT REGISTER (DOR).
+The current controller model doesn't expect a BlockBackend
+to be NULL. A simple way to fix CVE-2021-20196 is to create
+an empty BlockBackend when it is missing. All further
+accesses will be safely handled, and the controller state
+machines keep behaving correctly.
+---
+ hw/block/fdc.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/hw/block/fdc.c b/hw/block/fdc.c
+index ac5d31e8..e128e975 100644
+--- a/hw/block/fdc.c
++++ b/hw/block/fdc.c
+@@ -58,6 +58,11 @@
+ } \
+ } while (0)
+
++/* Anonymous BlockBackend for empty drive */
++static BlockBackend *blk_create_empty_drive(void)
++{
++ return blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
++}
+
+ /********************************************************/
+ /* qdev floppy bus */
+@@ -1356,7 +1361,19 @@ static FDrive *get_drv(FDCtrl *fdctrl, int unit)
+
+ static FDrive *get_cur_drv(FDCtrl *fdctrl)
+ {
+- return get_drv(fdctrl, fdctrl->cur_drv);
++ FDrive *cur_drv = get_drv(fdctrl, fdctrl->cur_drv);
++
++ if (!cur_drv->blk) {
++ /*
++ * Kludge: empty drive line selected. Create an anonymous
++ * BlockBackend to avoid NULL deref with various BlockBackend
++ * API calls within this model (CVE-2021-20196).
++ * Due to the controller QOM model limitations, we don't
++ * attach the created to the controller device.
++ */
++ cur_drv->blk = blk_create_empty_drive();
++ }
++ return cur_drv;
+ }
+
+ /* Status A register : 0x00 (read-only) */
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch
new file mode 100644
index 0000000000..d53383247e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-1.patch
@@ -0,0 +1,85 @@
+From b263d8f928001b5cfa2a993ea43b7a5b3a1811e8 Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:35 +0800
+Subject: [PATCH] hw/sd: sdhci: Don't transfer any data when command time out
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+At the end of sdhci_send_command(), it starts a data transfer if the
+command register indicates data is associated. But the data transfer
+should only be initiated when the command execution has succeeded.
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001810
+outl 0xcfc 0xe1068000
+outl 0xcf8 0x80001804
+outw 0xcfc 0x7
+write 0xe106802c 0x1 0x0f
+write 0xe1068004 0xc 0x2801d10101fffffbff28a384
+write 0xe106800c 0x1f 0x9dacbbcad9e8f7061524334251606f7e8d9cabbac9d8e7f60514233241505f
+write 0xe1068003 0x28 0x80d000251480d000252280d000253080d000253e80d000254c80d000255a80d000256880d0002576
+write 0xe1068003 0x1 0xfe
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -M pc-q35-5.0 \
+ -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive \
+ -monitor none -serial none -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Acked-by: Alistair Francis <alistair.francis@wdc.com>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-2-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-1.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/b263d8f928001b5cfa2a993ea43b7a5b3a1811e8 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -316,6 +316,7 @@ static void sdhci_send_command(SDHCIStat
+ SDRequest request;
+ uint8_t response[16];
+ int rlen;
++ bool timeout = false;
+
+ s->errintsts = 0;
+ s->acmd12errsts = 0;
+@@ -339,6 +340,7 @@ static void sdhci_send_command(SDHCIStat
+ trace_sdhci_response16(s->rspreg[3], s->rspreg[2],
+ s->rspreg[1], s->rspreg[0]);
+ } else {
++ timeout = true;
+ trace_sdhci_error("timeout waiting for command response");
+ if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) {
+ s->errintsts |= SDHC_EIS_CMDTIMEOUT;
+@@ -359,7 +361,7 @@ static void sdhci_send_command(SDHCIStat
+
+ sdhci_update_irq(s);
+
+- if (s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
++ if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
+ s->data_count = 0;
+ sdhci_data_transfer(s);
+ }
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch
new file mode 100644
index 0000000000..dc00f76ec9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-2.patch
@@ -0,0 +1,103 @@
+From 8be45cc947832b3c02144c9d52921f499f2d77fe Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:36 +0800
+Subject: [PATCH] hw/sd: sdhci: Don't write to SDHC_SYSAD register when
+ transfer is in progress
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+Per "SD Host Controller Standard Specification Version 7.00"
+chapter 2.2.1 SDMA System Address Register:
+
+This register can be accessed only if no transaction is executing
+(i.e., after a transaction has stopped).
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001010
+outl 0xcfc 0xfbefff00
+outl 0xcf8 0x80001001
+outl 0xcfc 0x06000000
+write 0xfbefff2c 0x1 0x05
+write 0xfbefff0f 0x1 0x37
+write 0xfbefff0a 0x1 0x01
+write 0xfbefff0f 0x1 0x29
+write 0xfbefff0f 0x1 0x02
+write 0xfbefff0f 0x1 0x03
+write 0xfbefff04 0x1 0x01
+write 0xfbefff05 0x1 0x01
+write 0xfbefff07 0x1 0x02
+write 0xfbefff0c 0x1 0x33
+write 0xfbefff0e 0x1 0x20
+write 0xfbefff0f 0x1 0x00
+write 0xfbefff2a 0x1 0x01
+write 0xfbefff0c 0x1 0x00
+write 0xfbefff03 0x1 0x00
+write 0xfbefff05 0x1 0x00
+write 0xfbefff2a 0x1 0x02
+write 0xfbefff0c 0x1 0x32
+write 0xfbefff01 0x1 0x01
+write 0xfbefff02 0x1 0x01
+write 0xfbefff03 0x1 0x01
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-3-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-2.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/8be45cc947832b3c02144c9d52921f499f2d77fe ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1117,15 +1117,17 @@ sdhci_write(void *opaque, hwaddr offset,
+
+ switch (offset & ~0x3) {
+ case SDHC_SYSAD:
+- s->sdmasysad = (s->sdmasysad & mask) | value;
+- MASKED_WRITE(s->sdmasysad, mask, value);
+- /* Writing to last byte of sdmasysad might trigger transfer */
+- if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt &&
+- s->blksize && SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
+- if (s->trnmod & SDHC_TRNS_MULTI) {
+- sdhci_sdma_transfer_multi_blocks(s);
+- } else {
+- sdhci_sdma_transfer_single_block(s);
++ if (!TRANSFERRING_DATA(s->prnsts)) {
++ s->sdmasysad = (s->sdmasysad & mask) | value;
++ MASKED_WRITE(s->sdmasysad, mask, value);
++ /* Writing to last byte of sdmasysad might trigger transfer */
++ if (!(mask & 0xFF000000) && s->blkcnt && s->blksize &&
++ SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
++ if (s->trnmod & SDHC_TRNS_MULTI) {
++ sdhci_sdma_transfer_multi_blocks(s);
++ } else {
++ sdhci_sdma_transfer_single_block(s);
++ }
+ }
+ }
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch
new file mode 100644
index 0000000000..d06ac0ed3c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-3.patch
@@ -0,0 +1,71 @@
+Backport of:
+
+From bc6f28995ff88f5d82c38afcfd65406f0ae375aa Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:37 +0800
+Subject: [PATCH] hw/sd: sdhci: Correctly set the controller status for ADMA
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+When an ADMA transfer is started, the codes forget to set the
+controller status to indicate a transfer is in progress.
+
+With this fix, the following 2 reproducers:
+
+https://paste.debian.net/plain/1185136
+https://paste.debian.net/plain/1185141
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-4-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-3.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/bc6f28995ff88f5d82c38afcfd65406f0ae375aa ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -776,8 +776,9 @@ static void sdhci_do_adma(SDHCIState *s)
+
+ switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) {
+ case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */
+-
++ s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
+ if (s->trnmod & SDHC_TRNS_READ) {
++ s->prnsts |= SDHC_DOING_READ;
+ while (length) {
+ if (s->data_count == 0) {
+ for (n = 0; n < block_size; n++) {
+@@ -807,6 +808,7 @@ static void sdhci_do_adma(SDHCIState *s)
+ }
+ }
+ } else {
++ s->prnsts |= SDHC_DOING_WRITE;
+ while (length) {
+ begin = s->data_count;
+ if ((length + begin) < block_size) {
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch
new file mode 100644
index 0000000000..2e49e3bc18
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-4.patch
@@ -0,0 +1,52 @@
+Backport of:
+
+From 5cd7aa3451b76bb19c0f6adc2b931f091e5d7fcd Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:38 +0800
+Subject: [PATCH] hw/sd: sdhci: Limit block size only when SDHC_BLKSIZE
+ register is writable
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+The codes to limit the maximum block size is only necessary when
+SDHC_BLKSIZE register is writable.
+
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-5-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-4.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/5cd7aa3451b76bb19c0f6adc2b931f091e5d7fcd ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1137,15 +1137,15 @@ sdhci_write(void *opaque, hwaddr offset,
+ if (!TRANSFERRING_DATA(s->prnsts)) {
+ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
+- }
+
+- /* Limit block size to the maximum buffer size */
+- if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
+- qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " \
+- "the maximum buffer 0x%x", __func__, s->blksize,
+- s->buf_maxsz);
++ /* Limit block size to the maximum buffer size */
++ if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
++ qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than "
++ "the maximum buffer 0x%x\n", __func__, s->blksize,
++ s->buf_maxsz);
+
+- s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
++ s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
++ }
+ }
+
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch
new file mode 100644
index 0000000000..7b436809e9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3409-5.patch
@@ -0,0 +1,93 @@
+From cffb446e8fd19a14e1634c7a3a8b07be3f01d5c9 Mon Sep 17 00:00:00 2001
+From: Bin Meng <bmeng.cn@gmail.com>
+Date: Wed, 3 Mar 2021 20:26:39 +0800
+Subject: [PATCH] hw/sd: sdhci: Reset the data pointer of s->fifo_buffer[] when
+ a different block size is programmed
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf8
+Content-Transfer-Encoding: 8bit
+
+If the block size is programmed to a different value from the
+previous one, reset the data pointer of s->fifo_buffer[] so that
+s->fifo_buffer[] can be filled in using the new block size in
+the next transfer.
+
+With this fix, the following reproducer:
+
+outl 0xcf8 0x80001010
+outl 0xcfc 0xe0000000
+outl 0xcf8 0x80001001
+outl 0xcfc 0x06000000
+write 0xe000002c 0x1 0x05
+write 0xe0000005 0x1 0x02
+write 0xe0000007 0x1 0x01
+write 0xe0000028 0x1 0x10
+write 0x0 0x1 0x23
+write 0x2 0x1 0x08
+write 0xe000000c 0x1 0x01
+write 0xe000000e 0x1 0x20
+write 0xe000000f 0x1 0x00
+write 0xe000000c 0x1 0x32
+write 0xe0000004 0x2 0x0200
+write 0xe0000028 0x1 0x00
+write 0xe0000003 0x1 0x40
+
+cannot be reproduced with the following QEMU command line:
+
+$ qemu-system-x86_64 -nographic -machine accel=qtest -m 512M \
+ -nodefaults -device sdhci-pci,sd-spec-version=3 \
+ -drive if=sd,index=0,file=null-co://,format=raw,id=mydrive \
+ -device sd-card,drive=mydrive -qtest stdio
+
+Cc: qemu-stable@nongnu.org
+Fixes: CVE-2020-17380
+Fixes: CVE-2020-25085
+Fixes: CVE-2021-3409
+Fixes: d7dfca0807a0 ("hw/sdhci: introduce standard SD host controller")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Reported-by: Cornelius Aschermann (Ruhr-Universität Bochum)
+Reported-by: Sergej Schumilo (Ruhr-Universität Bochum)
+Reported-by: Simon Wörner (Ruhr-Universität Bochum)
+Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
+Buglink: https://bugs.launchpad.net/qemu/+bug/1909418
+Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1928146
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
+Message-Id: <20210303122639.20004-6-bmeng.cn@gmail.com>
+Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+
+CVE: CVE-2021-3409 CVE-2020-17380
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2021-3409-5.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/cffb446e8fd19a14e1634c7a3a8b07be3f01d5c9 ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/sd/sdhci.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/hw/sd/sdhci.c
++++ b/hw/sd/sdhci.c
+@@ -1135,6 +1135,8 @@ sdhci_write(void *opaque, hwaddr offset,
+ break;
+ case SDHC_BLKSIZE:
+ if (!TRANSFERRING_DATA(s->prnsts)) {
++ uint16_t blksize = s->blksize;
++
+ MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12));
+ MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
+
+@@ -1146,6 +1148,16 @@ sdhci_write(void *opaque, hwaddr offset,
+
+ s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
+ }
++
++ /*
++ * If the block size is programmed to a different value from
++ * the previous one, reset the data pointer of s->fifo_buffer[]
++ * so that s->fifo_buffer[] can be filled in using the new block
++ * size in the next transfer.
++ */
++ if (blksize != s->blksize) {
++ s->data_count = 0;
++ }
+ }
+
+ break;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch
new file mode 100644
index 0000000000..4ff3413f8e
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3507.patch
@@ -0,0 +1,87 @@
+From defac5e2fbddf8423a354ff0454283a2115e1367 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@redhat.com>
+Date: Thu, 18 Nov 2021 12:57:32 +0100
+Subject: [PATCH] hw/block/fdc: Prevent end-of-track overrun (CVE-2021-3507)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Per the 82078 datasheet, if the end-of-track (EOT byte in
+the FIFO) is more than the number of sectors per side, the
+command is terminated unsuccessfully:
+
+* 5.2.5 DATA TRANSFER TERMINATION
+
+ The 82078 supports terminal count explicitly through
+ the TC pin and implicitly through the underrun/over-
+ run and end-of-track (EOT) functions. For full sector
+ transfers, the EOT parameter can define the last
+ sector to be transferred in a single or multisector
+ transfer. If the last sector to be transferred is a par-
+ tial sector, the host can stop transferring the data in
+ mid-sector, and the 82078 will continue to complete
+ the sector as if a hardware TC was received. The
+ only difference between these implicit functions and
+ TC is that they return "abnormal termination" result
+ status. Such status indications can be ignored if they
+ were expected.
+
+* 6.1.3 READ TRACK
+
+ This command terminates when the EOT specified
+ number of sectors have been read. If the 82078
+ does not find an I D Address Mark on the diskette
+ after the second· occurrence of a pulse on the
+ INDX# pin, then it sets the IC code in Status Regis-
+ ter 0 to "01" (Abnormal termination), sets the MA bit
+ in Status Register 1 to "1", and terminates the com-
+ mand.
+
+* 6.1.6 VERIFY
+
+ Refer to Table 6-6 and Table 6-7 for information
+ concerning the values of MT and EC versus SC and
+ EOT value.
+
+* Table 6·6. Result Phase Table
+
+* Table 6-7. Verify Command Result Phase Table
+
+Fix by aborting the transfer when EOT > # Sectors Per Side.
+
+Cc: qemu-stable@nongnu.org
+Cc: Hervé Poussineau <hpoussin@reactos.org>
+Fixes: baca51faff0 ("floppy driver: disk geometry auto detect")
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/339
+Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20211118115733.4038610-2-philmd@redhat.com>
+Reviewed-by: Hanna Reitz <hreitz@redhat.com>
+Signed-off-by: Kevin Wolf <kwolf@redhat.com>
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/defac5e2fbddf8423a354ff0454283a2115e1367]
+CVE: CVE-2021-3507
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ hw/block/fdc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/hw/block/fdc.c b/hw/block/fdc.c
+index 347875a0cdae..57bb355794a9 100644
+--- a/hw/block/fdc.c
++++ b/hw/block/fdc.c
+@@ -1530,6 +1530,14 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
+ int tmp;
+ fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]);
+ tmp = (fdctrl->fifo[6] - ks + 1);
++ if (tmp < 0) {
++ FLOPPY_DPRINTF("invalid EOT: %d\n", tmp);
++ fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00);
++ fdctrl->fifo[3] = kt;
++ fdctrl->fifo[4] = kh;
++ fdctrl->fifo[5] = ks;
++ return;
++ }
+ if (fdctrl->fifo[0] & 0x80)
+ tmp += fdctrl->fifo[6];
+ fdctrl->data_len *= tmp;
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
new file mode 100644
index 0000000000..6e7af8540a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3638.patch
@@ -0,0 +1,80 @@
+From b68d13531d8882ba66994b9f767b6a8f822464f3 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Fri, 11 Nov 2022 12:43:26 +0530
+Subject: [PATCH] CVE-2021-3638
+
+Upstream-Status: Backport [https://lists.nongnu.org/archive/html/qemu-devel/2021-09/msg01682.html]
+CVE: CVE-2021-3638
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+When building QEMU with DEBUG_ATI defined then running with
+'-device ati-vga,romfile="" -d unimp,guest_errors -trace ati\*'
+we get:
+
+ ati_mm_write 4 0x16c0 DP_CNTL <- 0x1
+ ati_mm_write 4 0x146c DP_GUI_MASTER_CNTL <- 0x2
+ ati_mm_write 4 0x16c8 DP_MIX <- 0xff0000
+ ati_mm_write 4 0x16c4 DP_DATATYPE <- 0x2
+ ati_mm_write 4 0x224 CRTC_OFFSET <- 0x0
+ ati_mm_write 4 0x142c DST_PITCH_OFFSET <- 0xfe00000
+ ati_mm_write 4 0x1420 DST_Y <- 0x3fff
+ ati_mm_write 4 0x1410 DST_HEIGHT <- 0x3fff
+ ati_mm_write 4 0x1588 DST_WIDTH_X <- 0x3fff3fff
+ ati_2d_blt: vram:0x7fff5fa00000 addr:0 ds:0x7fff61273800 stride:2560 bpp:32
+rop:0xff
+ ati_2d_blt: 0 0 0, 0 127 0, (0,0) -> (16383,16383) 16383x16383 > ^
+ ati_2d_blt: pixman_fill(dst:0x7fff5fa00000, stride:254, bpp:8, x:16383,
+y:16383, w:16383, h:16383, xor:0xff000000)
+ Thread 3 "qemu-system-i38" received signal SIGSEGV, Segmentation fault.
+ (gdb) bt
+ #0 0x00007ffff7f62ce0 in sse2_fill.lto_priv () at /lib64/libpixman-1.so.0
+ #1 0x00007ffff7f09278 in pixman_fill () at /lib64/libpixman-1.so.0
+ #2 0x0000555557b5a9af in ati_2d_blt (s=0x631000028800) at
+hw/display/ati_2d.c:196
+ #3 0x0000555557b4b5a2 in ati_mm_write (opaque=0x631000028800, addr=5512,
+data=1073692671, size=4) at hw/display/ati.c:843
+ #4 0x0000555558b90ec4 in memory_region_write_accessor (mr=0x631000039cc0,
+addr=5512, ..., size=4, ...) at softmmu/memory.c:492
+
+Commit 584acf34cb0 ("ati-vga: Fix reverse bit blts") introduced
+the local dst_x and dst_y which adjust the (x, y) coordinates
+depending on the direction in the SRCCOPY ROP3 operation, but
+forgot to address the same issue for the PATCOPY, BLACKNESS and
+WHITENESS operations, which also call pixman_fill().
+
+Fix that now by using the adjusted coordinates in the pixman_fill
+call, and update the related debug printf().
+---
+ hw/display/ati_2d.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
+index 4dc10ea7..692bec91 100644
+--- a/hw/display/ati_2d.c
++++ b/hw/display/ati_2d.c
+@@ -84,7 +84,7 @@ void ati_2d_blt(ATIVGAState *s)
+ DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n",
+ s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset,
+ s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch,
+- s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y,
++ s->regs.src_x, s->regs.src_y, dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'),
+ (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^'));
+@@ -180,11 +180,11 @@ void ati_2d_blt(ATIVGAState *s)
+ dst_stride /= sizeof(uint32_t);
+ DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n",
+ dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ pixman_fill((uint32_t *)dst_bits, dst_stride, bpp,
+- s->regs.dst_x, s->regs.dst_y,
++ dst_x, dst_y,
+ s->regs.dst_width, s->regs.dst_height,
+ filler);
+ if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr &&
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch
new file mode 100644
index 0000000000..cdd9c38db9
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3713.patch
@@ -0,0 +1,67 @@
+From a114d6baedf2cccb454a46d36e399fec1bc3e1c0 Mon Sep 17 00:00:00 2001
+From: Gerd Hoffmann <kraxel@redhat.com>
+Date: Wed, 18 Aug 2021 14:05:05 +0200
+Subject: [PATCH] uas: add stream number sanity checks.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The device uses the guest-supplied stream number unchecked, which can
+lead to guest-triggered out-of-band access to the UASDevice->data3 and
+UASDevice->status3 fields. Add the missing checks.
+
+Fixes: CVE-2021-3713
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Reported-by: Chen Zhe <chenzhe@huawei.com>
+Reported-by: Tan Jingguo <tanjingguo@huawei.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Message-Id: <20210818120505.1258262-2-kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/13b250b12ad3c59114a6a17d59caf073ce45b33a
+CVE: CVE-2021-3713
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/usb/dev-uas.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
+index 6d6d1073..0b8cd4dd 100644
+--- a/hw/usb/dev-uas.c
++++ b/hw/usb/dev-uas.c
+@@ -830,6 +830,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ }
+ break;
+ case UAS_PIPE_ID_STATUS:
++ if (p->stream > UAS_MAX_STREAMS) {
++ goto err_stream;
++ }
+ if (p->stream) {
+ QTAILQ_FOREACH(st, &uas->results, next) {
+ if (st->stream == p->stream) {
+@@ -857,6 +860,9 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ break;
+ case UAS_PIPE_ID_DATA_IN:
+ case UAS_PIPE_ID_DATA_OUT:
++ if (p->stream > UAS_MAX_STREAMS) {
++ goto err_stream;
++ }
+ if (p->stream) {
+ req = usb_uas_find_request(uas, p->stream);
+ } else {
+@@ -892,6 +898,11 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+ p->status = USB_RET_STALL;
+ break;
+ }
++
++err_stream:
++ error_report("%s: invalid stream %d", __func__, p->stream);
++ p->status = USB_RET_STALL;
++ return;
+ }
+
+ static void usb_uas_unrealize(USBDevice *dev, Error **errp)
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch
new file mode 100644
index 0000000000..b291ade4e3
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3748.patch
@@ -0,0 +1,124 @@
+From bedd7e93d01961fcb16a97ae45d93acf357e11f6 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Thu, 2 Sep 2021 13:44:12 +0800
+Subject: [PATCH] virtio-net: fix use after unmap/free for sg
+
+When mergeable buffer is enabled, we try to set the num_buffers after
+the virtqueue elem has been unmapped. This will lead several issues,
+E.g a use after free when the descriptor has an address which belongs
+to the non direct access region. In this case we use bounce buffer
+that is allocated during address_space_map() and freed during
+address_space_unmap().
+
+Fixing this by storing the elems temporarily in an array and delay the
+unmap after we set the the num_buffers.
+
+This addresses CVE-2021-3748.
+
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Fixes: fbe78f4f55c6 ("virtio-net support")
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+
+https://github.com/qemu/qemu/commit/bedd7e93d01961fcb16a97ae45d93acf357e11f6
+CVE: CVE-2021-3748
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/net/virtio-net.c | 39 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 32 insertions(+), 7 deletions(-)
+
+diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
+index 16d20cdee52a..f205331dcf8c 100644
+--- a/hw/net/virtio-net.c
++++ b/hw/net/virtio-net.c
+@@ -1746,10 +1746,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ VirtIONet *n = qemu_get_nic_opaque(nc);
+ VirtIONetQueue *q = virtio_net_get_subqueue(nc);
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
++ VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
++ size_t lens[VIRTQUEUE_MAX_SIZE];
+ struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ unsigned mhdr_cnt = 0;
+- size_t offset, i, guest_offset;
++ size_t offset, i, guest_offset, j;
++ ssize_t err;
+
+ if (!virtio_net_can_receive(nc)) {
+ return -1;
+@@ -1780,6 +1783,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+
+ total = 0;
+
++ if (i == VIRTQUEUE_MAX_SIZE) {
++ virtio_error(vdev, "virtio-net unexpected long buffer chain");
++ err = size;
++ goto err;
++ }
++
+ elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ if (i) {
+@@ -1791,7 +1800,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+- return -1;
++ err = -1;
++ goto err;
+ }
+
+ if (elem->in_num < 1) {
+@@ -1799,7 +1809,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+- return -1;
++ err = -1;
++ goto err;
+ }
+
+ sg = elem->in_sg;
+@@ -1836,12 +1847,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ if (!n->mergeable_rx_bufs && offset < size) {
+ virtqueue_unpop(q->rx_vq, elem, total);
+ g_free(elem);
+- return size;
++ err = size;
++ goto err;
+ }
+
+- /* signal other side */
+- virtqueue_fill(q->rx_vq, elem, total, i++);
+- g_free(elem);
++ elems[i] = elem;
++ lens[i] = total;
++ i++;
+ }
+
+ if (mhdr_cnt) {
+@@ -1851,10 +1863,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
+ &mhdr.num_buffers, sizeof mhdr.num_buffers);
+ }
+
++ for (j = 0; j < i; j++) {
++ /* signal other side */
++ virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
++ g_free(elems[j]);
++ }
++
+ virtqueue_flush(q->rx_vq, i);
+ virtio_notify(vdev, q->rx_vq);
+
+ return size;
++
++err:
++ for (j = 0; j < i; j++) {
++ g_free(elems[j]);
++ }
++
++ return err;
+ }
+
+ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch
new file mode 100644
index 0000000000..43630e71fb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3750.patch
@@ -0,0 +1,180 @@
+From 1938fbc7ec197e2612ab2ce36dd69bff19208aa5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 10 Oct 2022 17:44:41 +0530
+Subject: [PATCH] CVE-2021-3750
+
+Upstream-Status: Backport [https://git.qemu.org/?p=qemu.git;a=commit;h=b9d383ab797f54ae5fa8746117770709921dc529 && https://git.qemu.org/?p=qemu.git;a=commit;h=3ab6fdc91b72e156da22848f0003ff4225690ced && https://git.qemu.org/?p=qemu.git;a=commit;h=58e74682baf4e1ad26b064d8c02e5bc99c75c5d9]
+CVE: CVE-2021-3750
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ exec.c | 55 +++++++++++++++++++++++++++++++-------
+ hw/intc/arm_gicv3_redist.c | 4 +--
+ include/exec/memattrs.h | 9 +++++++
+ 3 files changed, 56 insertions(+), 12 deletions(-)
+
+diff --git a/exec.c b/exec.c
+index 1360051a..10581d8d 100644
+--- a/exec.c
++++ b/exec.c
+@@ -39,6 +39,7 @@
+ #include "qemu/config-file.h"
+ #include "qemu/error-report.h"
+ #include "qemu/qemu-print.h"
++#include "qemu/log.h"
+ #if defined(CONFIG_USER_ONLY)
+ #include "qemu.h"
+ #else /* !CONFIG_USER_ONLY */
+@@ -3118,6 +3119,33 @@ static bool prepare_mmio_access(MemoryRegion *mr)
+ return release_lock;
+ }
+
++/**
+++ * flatview_access_allowed
+++ * @mr: #MemoryRegion to be accessed
+++ * @attrs: memory transaction attributes
+++ * @addr: address within that memory region
+++ * @len: the number of bytes to access
+++ *
+++ * Check if a memory transaction is allowed.
+++ *
+++ * Returns: true if transaction is allowed, false if denied.
+++ */
++static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs,
++ hwaddr addr, hwaddr len)
++{
++ if (likely(!attrs.memory)) {
++ return true;
++ }
++ if (memory_region_is_ram(mr)) {
++ return true;
++ }
++ qemu_log_mask(LOG_GUEST_ERROR,
++ "Invalid access to non-RAM device at "
++ "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", "
++ "region '%s'\n", addr, len, memory_region_name(mr));
++ return false;
++}
++
+ /* Called within RCU critical section. */
+ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ MemTxAttrs attrs,
+@@ -3131,7 +3159,10 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
+ bool release_lock = false;
+
+ for (;;) {
+- if (!memory_access_is_direct(mr, true)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, true)) {
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+ /* XXX: could force current_cpu to NULL to avoid
+@@ -3173,14 +3204,14 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
+ hwaddr l;
+ hwaddr addr1;
+ MemoryRegion *mr;
+- MemTxResult result = MEMTX_OK;
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
+- result = flatview_write_continue(fv, addr, attrs, buf, len,
+- addr1, l, mr);
+-
+- return result;
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
++ return flatview_write_continue(fv, addr, attrs, buf, len,
++ addr1, l, mr);
+ }
+
+ /* Called within RCU critical section. */
+@@ -3195,7 +3226,10 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
+ bool release_lock = false;
+
+ for (;;) {
+- if (!memory_access_is_direct(mr, false)) {
++ if (!flatview_access_allowed(mr, attrs, addr1, l)) {
++ result |= MEMTX_ACCESS_ERROR;
++ /* Keep going. */
++ } else if (!memory_access_is_direct(mr, false)) {
+ /* I/O case */
+ release_lock |= prepare_mmio_access(mr);
+ l = memory_access_size(mr, l, addr1);
+@@ -3238,6 +3272,9 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
+
+ l = len;
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
++ if (!flatview_access_allowed(mr, attrs, addr, len)) {
++ return MEMTX_ACCESS_ERROR;
++ }
+ return flatview_read_continue(fv, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+@@ -3474,12 +3511,10 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs)
+ {
+ FlatView *fv;
+- bool result;
+
+ RCU_READ_LOCK_GUARD();
+ fv = address_space_to_flatview(as);
+- result = flatview_access_valid(fv, addr, len, is_write, attrs);
+- return result;
++ return flatview_access_valid(fv, addr, len, is_write, attrs);
+ }
+
+ static hwaddr
+diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
+index 8645220d..44368e28 100644
+--- a/hw/intc/arm_gicv3_redist.c
++++ b/hw/intc/arm_gicv3_redist.c
+@@ -450,7 +450,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest read at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+@@ -507,7 +507,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
+ break;
+ }
+
+- if (r == MEMTX_ERROR) {
++ if (r != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid guest write at offset " TARGET_FMT_plx
+ "size %u\n", __func__, offset, size);
+diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h
+index 95f2d20d..9fb98bc1 100644
+--- a/include/exec/memattrs.h
++++ b/include/exec/memattrs.h
+@@ -35,6 +35,14 @@ typedef struct MemTxAttrs {
+ unsigned int secure:1;
+ /* Memory access is usermode (unprivileged) */
+ unsigned int user:1;
++ /*
++ * Bus interconnect and peripherals can access anything (memories,
++ * devices) by default. By setting the 'memory' bit, bus transaction
++ * are restricted to "normal" memories (per the AMBA documentation)
++ * versus devices. Access to devices will be logged and rejected
++ * (see MEMTX_ACCESS_ERROR).
++ */
++ unsigned int memory:1;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
+ /* Invert endianness for this page */
+@@ -66,6 +74,7 @@ typedef struct MemTxAttrs {
+ #define MEMTX_OK 0
+ #define MEMTX_ERROR (1U << 0) /* device returned an error */
+ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
++#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */
+ typedef uint32_t MemTxResult;
+
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
new file mode 100644
index 0000000000..a1862f1226
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3929.patch
@@ -0,0 +1,81 @@
+From 2c682b5975b41495f98cc34b8243042c446eec44 Mon Sep 17 00:00:00 2001
+From: Gaurav Gupta <gauragup@cisco.com>
+Date: Wed, 29 Mar 2023 14:36:16 -0700
+Subject: [PATCH] hw/nvme: fix CVE-2021-3929 MIME-Version: 1.0 Content-Type:
+ text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
+device itself. This still allows DMA to MMIO regions of other devices
+(e.g. doing P2P DMA to the controller memory buffer of another NVMe
+device).
+
+Fixes: CVE-2021-3929
+Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+
+Upstream-Status: Backport
+[https://gitlab.com/qemu-project/qemu/-/commit/736b01642d85be832385]
+CVE: CVE-2021-3929
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
+---
+ hw/block/nvme.c | 23 +++++++++++++++++++++++
+ hw/block/nvme.h | 1 +
+ 2 files changed, 24 insertions(+)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index bda446d..ae9b19f 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -60,8 +60,31 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
+ return addr >= low && addr < hi;
+ }
+
++static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr)
++{
++ hwaddr hi, lo;
++
++ /*
++ * The purpose of this check is to guard against invalid "local" access to
++ * the iomem (i.e. controller registers). Thus, we check against the range
++ * covered by the 'bar0' MemoryRegion since that is currently composed of
++ * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however,
++ * that if the device model is ever changed to allow the CMB to be located
++ * in BAR0 as well, then this must be changed.
++ */
++ lo = n->bar0.addr;
++ hi = lo + int128_get64(n->bar0.size);
++
++ return addr >= lo && addr < hi;
++}
++
+ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
++
++ if (nvme_addr_is_iomem(n, addr)) {
++ return NVME_DATA_TRAS_ERROR;
++ }
++
+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+ return 0;
+diff --git a/hw/block/nvme.h b/hw/block/nvme.h
+index 557194e..5a2b119 100644
+--- a/hw/block/nvme.h
++++ b/hw/block/nvme.h
+@@ -59,6 +59,7 @@ typedef struct NvmeNamespace {
+
+ typedef struct NvmeCtrl {
+ PCIDevice parent_obj;
++ MemoryRegion bar0;
+ MemoryRegion iomem;
+ MemoryRegion ctrl_mem;
+ NvmeBar bar;
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch
new file mode 100644
index 0000000000..b1b5558647
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-3930.patch
@@ -0,0 +1,53 @@
+From b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 4 Nov 2021 17:31:38 +0100
+Subject: [PATCH] hw/scsi/scsi-disk: MODE_PAGE_ALLS not allowed in MODE SELECT
+ commands
+
+This avoids an off-by-one read of 'mode_sense_valid' buffer in
+hw/scsi/scsi-disk.c:mode_sense_page().
+
+Fixes: CVE-2021-3930
+Cc: qemu-stable@nongnu.org
+Reported-by: Alexander Bulekov <alxndr@bu.edu>
+Fixes: a8f4bbe2900 ("scsi-disk: store valid mode pages in a table")
+Fixes: #546
+Reported-by: Qiuhao Li <Qiuhao.Li@outlook.com>
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8
+CVE: CVE-2021-3930
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/scsi-disk.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
+index e8a547dbb7..d4914178ea 100644
+--- a/hw/scsi/scsi-disk.c
++++ b/hw/scsi/scsi-disk.c
+@@ -1087,6 +1087,7 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
+ uint8_t *p = *p_outbuf + 2;
+ int length;
+
++ assert(page < ARRAY_SIZE(mode_sense_valid));
+ if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
+ return -1;
+ }
+@@ -1428,6 +1429,11 @@ static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
+ return -1;
+ }
+
++ /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
++ if (page == MODE_PAGE_ALLS) {
++ return -1;
++ }
++
+ p = mode_current;
+ memset(mode_current, 0, inlen + 2);
+ len = mode_sense_page(s, page, &p, 0);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch
new file mode 100644
index 0000000000..80ad49e4ed
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-4206.patch
@@ -0,0 +1,89 @@
+From fa892e9abb728e76afcf27323ab29c57fb0fe7aa Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 7 Apr 2022 10:17:12 +0200
+Subject: [PATCH] ui/cursor: fix integer overflow in cursor_alloc
+ (CVE-2021-4206)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Prevent potential integer overflow by limiting 'width' and 'height' to
+512x512. Also change 'datasize' type to size_t. Refer to security
+advisory https://starlabs.sg/advisories/22-4206/ for more information.
+
+Fixes: CVE-2021-4206
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20220407081712.345609-1-mcascell@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/fa892e9a
+CVE: CVE-2021-4206
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/display/qxl-render.c | 7 +++++++
+ hw/display/vmware_vga.c | 2 ++
+ ui/cursor.c | 8 +++++++-
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index 237ed293ba..ca217004bf 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -247,6 +247,13 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor,
+ size_t size;
+
+ c = cursor_alloc(cursor->header.width, cursor->header.height);
++
++ if (!c) {
++ qxl_set_guest_bug(qxl, "%s: cursor %ux%u alloc error", __func__,
++ cursor->header.width, cursor->header.height);
++ goto fail;
++ }
++
+ c->hot_x = cursor->header.hot_spot_x;
+ c->hot_y = cursor->header.hot_spot_y;
+ switch (cursor->header.type) {
+diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
+index 98c83474ad..45d06cbe25 100644
+--- a/hw/display/vmware_vga.c
++++ b/hw/display/vmware_vga.c
+@@ -515,6 +515,8 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s,
+ int i, pixels;
+
+ qc = cursor_alloc(c->width, c->height);
++ assert(qc != NULL);
++
+ qc->hot_x = c->hot_x;
+ qc->hot_y = c->hot_y;
+ switch (c->bpp) {
+diff --git a/ui/cursor.c b/ui/cursor.c
+index 1d62ddd4d0..835f0802f9 100644
+--- a/ui/cursor.c
++++ b/ui/cursor.c
+@@ -46,6 +46,8 @@ static QEMUCursor *cursor_parse_xpm(const char *xpm[])
+
+ /* parse pixel data */
+ c = cursor_alloc(width, height);
++ assert(c != NULL);
++
+ for (pixel = 0, y = 0; y < height; y++, line++) {
+ for (x = 0; x < height; x++, pixel++) {
+ idx = xpm[line][x];
+@@ -91,7 +93,11 @@ QEMUCursor *cursor_builtin_left_ptr(void)
+ QEMUCursor *cursor_alloc(int width, int height)
+ {
+ QEMUCursor *c;
+- int datasize = width * height * sizeof(uint32_t);
++ size_t datasize = width * height * sizeof(uint32_t);
++
++ if (width > 512 || height > 512) {
++ return NULL;
++ }
+
+ c = g_malloc0(sizeof(QEMUCursor) + datasize);
+ c->width = width;
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch b/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch
new file mode 100644
index 0000000000..8418246247
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2021-4207.patch
@@ -0,0 +1,43 @@
+From 9569f5cb5b4bffa9d3ebc8ba7da1e03830a9a895 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Thu, 7 Apr 2022 10:11:06 +0200
+Subject: [PATCH] display/qxl-render: fix race condition in qxl_cursor
+ (CVE-2021-4207)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid fetching 'width' and 'height' a second time to prevent possible
+race condition. Refer to security advisory
+https://starlabs.sg/advisories/22-4207/ for more information.
+
+Fixes: CVE-2021-4207
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
+Message-Id: <20220407081106.343235-1-mcascell@redhat.com>
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/9569f5cb
+CVE: CVE-2021-4207
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/display/qxl-render.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index d28849b121..237ed293ba 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -266,7 +266,7 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor,
+ }
+ break;
+ case SPICE_CURSOR_TYPE_ALPHA:
+- size = sizeof(uint32_t) * cursor->header.width * cursor->header.height;
++ size = sizeof(uint32_t) * c->width * c->height;
+ qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id);
+ if (qxl->debug > 2) {
+ cursor_print_ascii_art(c, "qxl/alpha");
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch
new file mode 100644
index 0000000000..6a7ce0e26c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-1.patch
@@ -0,0 +1,42 @@
+From 6c8fa961da5e60f574bb52fd3ad44b1e9e8ad4b8 Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Tue, 5 Jul 2022 22:05:43 +0200
+Subject: [PATCH] scsi/lsi53c895a: fix use-after-free in lsi_do_msgout
+ (CVE-2022-0216)
+
+Set current_req->req to NULL to prevent reusing a free'd buffer in case of
+repeated SCSI cancel requests. Thanks to Thomas Huth for suggesting the patch.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Message-Id: <20220705200543.2366809-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/6c8fa961da5e60f574bb52fd3ad44b1e9e8ad4b8
+CVE: CVE-2022-0216
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/lsi53c895a.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index c8773f73f7..99ea42d49b 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1028,8 +1028,9 @@ static void lsi_do_msgout(LSIState *s)
+ case 0x0d:
+ /* The ABORT TAG message clears the current I/O process only. */
+ trace_lsi_do_msgout_abort(current_tag);
+- if (current_req) {
++ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
++ current_req->req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch
new file mode 100644
index 0000000000..137906cd30
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-0216-2.patch
@@ -0,0 +1,52 @@
+From 4367a20cc442c56b05611b4224de9a61908f9eac Mon Sep 17 00:00:00 2001
+From: Mauro Matteo Cascella <mcascell@redhat.com>
+Date: Mon, 11 Jul 2022 14:33:16 +0200
+Subject: [PATCH] scsi/lsi53c895a: really fix use-after-free in lsi_do_msgout
+ (CVE-2022-0216)
+
+Set current_req to NULL, not current_req->req, to prevent reusing a free'd
+buffer in case of repeated SCSI cancel requests. Also apply the fix to
+CLEAR QUEUE and BUS DEVICE RESET messages as well, since they also cancel
+the request.
+
+Thanks to Alexander Bulekov for providing a reproducer.
+
+Fixes: CVE-2022-0216
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/972
+Signed-off-by: Mauro Matteo Cascella <mcascell@redhat.com>
+Tested-by: Alexander Bulekov <alxndr@bu.edu>
+Message-Id: <20220711123316.421279-1-mcascell@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+https://gitlab.com/qemu-project/qemu/-/commit/4367a20cc4
+CVE: CVE-2022-0216
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/scsi/lsi53c895a.c | 3 +-
+ 1 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
+index 99ea42d49b..ad5f5e5f39 100644
+--- a/hw/scsi/lsi53c895a.c
++++ b/hw/scsi/lsi53c895a.c
+@@ -1030,7 +1030,7 @@ static void lsi_do_msgout(LSIState *s)
+ trace_lsi_do_msgout_abort(current_tag);
+ if (current_req && current_req->req) {
+ scsi_req_cancel(current_req->req);
+- current_req->req = NULL;
++ current_req = NULL;
+ }
+ lsi_disconnect(s);
+ break;
+@@ -1056,6 +1056,7 @@ static void lsi_do_msgout(LSIState *s)
+ /* clear the current I/O process */
+ if (s->current) {
+ scsi_req_cancel(s->current->req);
++ current_req = NULL;
+ }
+
+ /* As the current implemented devices scsi_disk and scsi_generic
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch
new file mode 100644
index 0000000000..fc4d6cf3df
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-26354.patch
@@ -0,0 +1,57 @@
+Backport of:
+
+From 8d1b247f3748ac4078524130c6d7ae42b6140aaf Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Mon, 28 Feb 2022 10:50:58 +0100
+Subject: [PATCH] vhost-vsock: detach the virqueue element in case of error
+
+In vhost_vsock_common_send_transport_reset(), if an element popped from
+the virtqueue is invalid, we should call virtqueue_detach_element() to
+detach it from the virtqueue before freeing its memory.
+
+Fixes: fc0b9b0e1c ("vhost-vsock: add virtio sockets device")
+Fixes: CVE-2022-26354
+Cc: qemu-stable@nongnu.org
+Reported-by: VictorV <vv474172261@gmail.com>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Message-Id: <20220228095058.27899-1-sgarzare@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+CVE: CVE-2022-26354
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2022-26354.patch?h=ubuntu/focal-security Upstream commit https://github.com/qemu/qemu/commit/8d1b247f3748ac4078524130c6d7ae42b6140aaf ]
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ hw/virtio/vhost-vsock-common.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/hw/virtio/vhost-vsock.c
++++ b/hw/virtio/vhost-vsock.c
+@@ -221,19 +221,23 @@ static void vhost_vsock_send_transport_r
+ if (elem->out_num) {
+ error_report("invalid vhost-vsock event virtqueue element with "
+ "out buffers");
+- goto out;
++ goto err;
+ }
+
+ if (iov_from_buf(elem->in_sg, elem->in_num, 0,
+ &event, sizeof(event)) != sizeof(event)) {
+ error_report("vhost-vsock event virtqueue element is too short");
+- goto out;
++ goto err;
+ }
+
+ virtqueue_push(vq, elem, sizeof(event));
+ virtio_notify(VIRTIO_DEVICE(vsock), vq);
+
+-out:
++ g_free(elem);
++ return;
++
++err:
++ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ }
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
new file mode 100644
index 0000000000..4196ebcf98
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-35414.patch
@@ -0,0 +1,53 @@
+From 09a07b5b39c87423df9e8f6574c19a14d36beac5 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 10:34:12 +0530
+Subject: [PATCH] CVE-2022-35414
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/418ade7849ce7641c0f7333718caf5091a02fd4c]
+CVE: CVE-2022-35414
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ exec.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/exec.c b/exec.c
+index 43c70ffb..2d6add46 100644
+--- a/exec.c
++++ b/exec.c
+@@ -685,7 +685,7 @@ static void tcg_iommu_free_notifier_list(CPUState *cpu)
+
+ /* Called from RCU critical section */
+ MemoryRegionSection *
+-address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
++address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
+ hwaddr *xlat, hwaddr *plen,
+ MemTxAttrs attrs, int *prot)
+ {
+@@ -694,6 +694,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ IOMMUMemoryRegionClass *imrc;
+ IOMMUTLBEntry iotlb;
+ int iommu_idx;
++ hwaddr addr = orig_addr;
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+
+ for (;;) {
+@@ -737,6 +738,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ return section;
+
+ translate_fail:
++ /*
++ * We should be given a page-aligned address -- certainly
++ * tlb_set_page_with_attrs() does so. The page offset of xlat
++ * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0.
++ * The page portion of xlat will be logged by memory_region_access_valid()
++ * when this memory access is rejected, so use the original untranslated
++ * physical address.
++ */
++ assert((orig_addr & ~TARGET_PAGE_MASK) == 0);
++ *xlat = orig_addr;
+ return &d->map.sections[PHYS_SECTION_UNASSIGNED];
+ }
+ #endif
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
new file mode 100644
index 0000000000..3f0d5fbd5c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2022-4144.patch
@@ -0,0 +1,103 @@
+From 6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:40 +0100
+Subject: [PATCH] hw/display/qxl: Avoid buffer overrun in qxl_phys2virt
+ (CVE-2022-4144)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Have qxl_get_check_slot_offset() return false if the requested
+buffer size does not fit within the slot memory region.
+
+Similarly qxl_phys2virt() now returns NULL in such case, and
+qxl_dirty_one_surface() aborts.
+
+This avoids buffer overrun in the host pointer returned by
+memory_region_get_ram_ptr().
+
+Fixes: CVE-2022-4144 (out-of-bounds read)
+Reported-by: Wenxu Yin (@awxylitol)
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1336
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-5-philmd@linaro.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/6dbbf055148c6f1b7d8a3251a65bd6f3d1e1f622]
+CVE: CVE-2022-4144
+Comments: Deleted patch hunk in qxl.h,as it contains change
+in comments which is not present in current version of qemu.
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ hw/display/qxl.c | 27 +++++++++++++++++++++++----
+ 1 file changed, 23 insertions(+), 4 deletions(-)
+
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index cd7eb39d..6bc8385b 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -1440,11 +1440,13 @@ static void qxl_reset_surfaces(PCIQXLDevice *d)
+
+ /* can be also called from spice server thread context */
+ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+- uint32_t *s, uint64_t *o)
++ uint32_t *s, uint64_t *o,
++ size_t size_requested)
+ {
+ uint64_t phys = le64_to_cpu(pqxl);
+ uint32_t slot = (phys >> (64 - 8)) & 0xff;
+ uint64_t offset = phys & 0xffffffffffff;
++ uint64_t size_available;
+
+ if (slot >= NUM_MEMSLOTS) {
+ qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot,
+@@ -1468,6 +1470,23 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ slot, offset, qxl->guest_slots[slot].size);
+ return false;
+ }
++ size_available = memory_region_size(qxl->guest_slots[slot].mr);
++ if (qxl->guest_slots[slot].offset + offset >= size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" > region size %"PRIu64"\n",
++ slot, qxl->guest_slots[slot].offset + offset,
++ size_available);
++ return false;
++ }
++ size_available -= qxl->guest_slots[slot].offset + offset;
++ if (size_requested > size_available) {
++ qxl_set_guest_bug(qxl,
++ "slot %d offset %"PRIu64" size %zu: "
++ "overrun by %"PRIu64" bytes\n",
++ slot, offset, size_requested,
++ size_requested - size_available);
++ return false;
++ }
+
+ *s = slot;
+ *o = offset;
+@@ -1486,7 +1505,7 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
+ offset = le64_to_cpu(pqxl) & 0xffffffffffff;
+ return (void *)(intptr_t)offset;
+ case MEMSLOT_GROUP_GUEST:
+- if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) {
++ if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+ return NULL;
+ }
+ ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr);
+@@ -1944,9 +1963,9 @@ static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ uint32_t slot;
+ bool rc;
+
+- rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset);
+- assert(rc == true);
+ size = (uint64_t)height * abs(stride);
++ rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size);
++ assert(rc == true);
+ trace_qxl_surfaces_dirty(qxl->id, offset, size);
+ qxl_set_dirty(qxl->guest_slots[slot].mr,
+ qxl->guest_slots[slot].offset + offset,
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
new file mode 100644
index 0000000000..26e22b4c31
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-0330.patch
@@ -0,0 +1,77 @@
+[Ubuntu note: remove fuzz-lsi53c895a-test.c changes since the file does not
+ exist for this release]
+From b987718bbb1d0eabf95499b976212dd5f0120d75 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Mon, 22 May 2023 11:10:11 +0200
+Subject: [PATCH] hw/scsi/lsi53c895a: Fix reentrancy issues in the LSI
+ controller (CVE-2023-0330)
+
+We cannot use the generic reentrancy guard in the LSI code, so
+we have to manually prevent endless reentrancy here. The problematic
+lsi_execute_script() function has already a way to detect whether
+too many instructions have been executed - we just have to slightly
+change the logic here that it also takes into account if the function
+has been called too often in a reentrant way.
+
+The code in fuzz-lsi53c895a-test.c has been taken from an earlier
+patch by Mauro Matteo Cascella.
+
+Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1563
+Message-Id: <20230522091011.1082574-1-thuth@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Alexander Bulekov <alxndr@bu.edu>
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+
+Reference: https://launchpad.net/ubuntu/+source/qemu/1:4.2-3ubuntu6.27
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/qemu/tree/debian/patches/CVE-2023-0330.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/qemu-project/qemu/-/commit/b987718bbb1d0eabf95499b976212dd5f0120d75]
+CVE: CVE-2023-0330
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/scsi/lsi53c895a.c | 23 +++++++++++++++------
+ tests/qtest/fuzz-lsi53c895a-test.c | 33 ++++++++++++++++++++++++++++++
+ 2 files changed, 50 insertions(+), 6 deletions(-)
+
+--- qemu-4.2.orig/hw/scsi/lsi53c895a.c
++++ qemu-4.2/hw/scsi/lsi53c895a.c
+@@ -1135,15 +1135,24 @@ static void lsi_execute_script(LSIState
+ uint32_t addr, addr_high;
+ int opcode;
+ int insn_processed = 0;
++ static int reentrancy_level;
++
++ reentrancy_level++;
+
+ s->istat1 |= LSI_ISTAT1_SRUN;
+ again:
+- if (++insn_processed > LSI_MAX_INSN) {
+- /* Some windows drivers make the device spin waiting for a memory
+- location to change. If we have been executed a lot of code then
+- assume this is the case and force an unexpected device disconnect.
+- This is apparently sufficient to beat the drivers into submission.
+- */
++ /*
++ * Some windows drivers make the device spin waiting for a memory location
++ * to change. If we have executed more than LSI_MAX_INSN instructions then
++ * assume this is the case and force an unexpected device disconnect. This
++ * is apparently sufficient to beat the drivers into submission.
++ *
++ * Another issue (CVE-2023-0330) can occur if the script is programmed to
++ * trigger itself again and again. Avoid this problem by stopping after
++ * being called multiple times in a reentrant way (8 is an arbitrary value
++ * which should be enough for all valid use cases).
++ */
++ if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) {
+ if (!(s->sien0 & LSI_SIST0_UDC)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "lsi_scsi: inf. loop with UDC masked");
+@@ -1597,6 +1606,8 @@ again:
+ }
+ }
+ trace_lsi_execute_script_stop();
++
++ reentrancy_level--;
+ }
+
+ static uint8_t lsi_reg_readb(LSIState *s, int offset)
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
new file mode 100644
index 0000000000..70b7d6c562
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-2861.patch
@@ -0,0 +1,178 @@
+From f6b0de53fb87ddefed348a39284c8e2f28dc4eda Mon Sep 17 00:00:00 2001
+From: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Date: Wed, 7 Jun 2023 18:29:33 +0200
+Subject: [PATCH] 9pfs: prevent opening special files (CVE-2023-2861)
+
+The 9p protocol does not specifically define how server shall behave when
+client tries to open a special file, however from security POV it does
+make sense for 9p server to prohibit opening any special file on host side
+in general. A sane Linux 9p client for instance would never attempt to
+open a special file on host side, it would always handle those exclusively
+on its guest side. A malicious client however could potentially escape
+from the exported 9p tree by creating and opening a device file on host
+side.
+
+With QEMU this could only be exploited in the following unsafe setups:
+
+ - Running QEMU binary as root AND 9p 'local' fs driver AND 'passthrough'
+ security model.
+
+or
+
+ - Using 9p 'proxy' fs driver (which is running its helper daemon as
+ root).
+
+These setups were already discouraged for safety reasons before,
+however for obvious reasons we are now tightening behaviour on this.
+
+Fixes: CVE-2023-2861
+Reported-by: Yanwu Shen <ywsPlz@gmail.com>
+Reported-by: Jietao Xiao <shawtao1125@gmail.com>
+Reported-by: Jinku Li <jkli@xidian.edu.cn>
+Reported-by: Wenbo Shen <shenwenbo@zju.edu.cn>
+Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com>
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Michael Tokarev <mjt@tls.msk.ru>
+Message-Id: <E1q6w7r-0000Q0-NM@lizzy.crudebyte.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/f6b0de53fb87ddefed348a39284c8e2f28dc4eda]
+CVE: CVE-2023-2861
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ fsdev/virtfs-proxy-helper.c | 27 +++++++++++++++++++++++--
+ hw/9pfs/9p-util.h | 40 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 65 insertions(+), 2 deletions(-)
+
+diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
+index 6f132c5f..300c9765 100644
+--- a/fsdev/virtfs-proxy-helper.c
++++ b/fsdev/virtfs-proxy-helper.c
+@@ -26,6 +26,7 @@
+ #include "qemu/xattr.h"
+ #include "9p-iov-marshal.h"
+ #include "hw/9pfs/9p-proxy.h"
++#include "hw/9pfs/9p-util.h"
+ #include "fsdev/9p-iov-marshal.h"
+
+ #define PROGNAME "virtfs-proxy-helper"
+@@ -350,6 +351,28 @@ static void resetugid(int suid, int sgid)
+ }
+ }
+
++/*
++ * Open regular file or directory. Attempts to open any special file are
++ * rejected.
++ *
++ * returns file descriptor or -1 on error
++ */
++static int open_regular(const char *pathname, int flags, mode_t mode)
++{
++ int fd;
++
++ fd = open(pathname, flags, mode);
++ if (fd < 0) {
++ return fd;
++ }
++
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
++ return fd;
++}
++
+ /*
+ * send response in two parts
+ * 1) ProxyHeader
+@@ -694,7 +717,7 @@ static int do_create(struct iovec *iovec)
+ if (ret < 0) {
+ goto unmarshal_err_out;
+ }
+- ret = open(path.data, flags, mode);
++ ret = open_regular(path.data, flags, mode);
+ if (ret < 0) {
+ ret = -errno;
+ }
+@@ -719,7 +742,7 @@ static int do_open(struct iovec *iovec)
+ if (ret < 0) {
+ goto err_out;
+ }
+- ret = open(path.data, flags);
++ ret = open_regular(path.data, flags, 0);
+ if (ret < 0) {
+ ret = -errno;
+ }
+diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h
+index 546f46dc..79fdd2a3 100644
+--- a/hw/9pfs/9p-util.h
++++ b/hw/9pfs/9p-util.h
+@@ -13,12 +13,16 @@
+ #ifndef QEMU_9P_UTIL_H
+ #define QEMU_9P_UTIL_H
+
++#include "qemu/error-report.h"
++
+ #ifdef O_PATH
+ #define O_PATH_9P_UTIL O_PATH
+ #else
+ #define O_PATH_9P_UTIL 0
+ #endif
+
++#define qemu_fstat fstat
++
+ static inline void close_preserve_errno(int fd)
+ {
+ int serrno = errno;
+@@ -26,6 +30,38 @@ static inline void close_preserve_errno(int fd)
+ errno = serrno;
+ }
+
++/**
++ * close_if_special_file() - Close @fd if neither regular file nor directory.
++ *
++ * @fd: file descriptor of open file
++ * Return: 0 on regular file or directory, -1 otherwise
++ *
++ * CVE-2023-2861: Prohibit opening any special file directly on host
++ * (especially device files), as a compromised client could potentially gain
++ * access outside exported tree under certain, unsafe setups. We expect
++ * client to handle I/O on special files exclusively on guest side.
++ */
++static inline int close_if_special_file(int fd)
++{
++ struct stat stbuf;
++
++ if (qemu_fstat(fd, &stbuf) < 0) {
++ close_preserve_errno(fd);
++ return -1;
++ }
++ if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) {
++ error_report_once(
++ "9p: broken or compromised client detected; attempt to open "
++ "special file (i.e. neither regular file, nor directory)"
++ );
++ close(fd);
++ errno = ENXIO;
++ return -1;
++ }
++
++ return 0;
++}
++
+ static inline int openat_dir(int dirfd, const char *name)
+ {
+ return openat(dirfd, name,
+@@ -56,6 +92,10 @@ again:
+ return -1;
+ }
+
++ if (close_if_special_file(fd) < 0) {
++ return -1;
++ }
++
+ serrno = errno;
+ /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't
+ * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat()
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
new file mode 100644
index 0000000000..7144bdca46
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3180.patch
@@ -0,0 +1,49 @@
+From 9d38a8434721a6479fe03fb5afb150ca793d3980 Mon Sep 17 00:00:00 2001
+From: zhenwei pi <pizhenwei@bytedance.com>
+Date: Thu, 3 Aug 2023 10:43:13 +0800
+Subject: [PATCH] virtio-crypto: verify src&dst buffer length for sym request
+
+For symmetric algorithms, the length of ciphertext must be as same
+as the plaintext.
+The missing verification of the src_len and the dst_len in
+virtio_crypto_sym_op_helper() may lead buffer overflow/divulged.
+
+This patch is originally written by Yiming Tao for QEMU-SECURITY,
+resend it(a few changes of error message) in qemu-devel.
+
+Fixes: CVE-2023-3180
+Fixes: 04b9b37edda("virtio-crypto: add data queue processing handler")
+Cc: Gonglei <arei.gonglei@huawei.com>
+Cc: Mauro Matteo Cascella <mcascell@redhat.com>
+Cc: Yiming Tao <taoym@zju.edu.cn>
+Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
+Message-Id: <20230803024314.29962-2-pizhenwei@bytedance.com>
+Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+
+Upstream-Status: Backport from [https://gitlab.com/qemu-project/qemu/-/commit/9d38a8434721a6479fe03fb5afb150ca793d3980]
+CVE: CVE-2023-3180
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ hw/virtio/virtio-crypto.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
+index 44faf5a522b..13aec771e11 100644
+--- a/hw/virtio/virtio-crypto.c
++++ b/hw/virtio/virtio-crypto.c
+@@ -634,6 +634,11 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+ return NULL;
+ }
+
++ if (unlikely(src_len != dst_len)) {
++ virtio_error(vdev, "sym request src len is different from dst len");
++ return NULL;
++ }
++
+ max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto too big length");
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
new file mode 100644
index 0000000000..2942e84cac
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-3354.patch
@@ -0,0 +1,87 @@
+From 10be627d2b5ec2d6b3dce045144aa739eef678b4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= <berrange@redhat.com>
+Date: Tue, 20 Jun 2023 09:45:34 +0100
+Subject: [PATCH] io: remove io watch if TLS channel is closed during handshake
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The TLS handshake make take some time to complete, during which time an
+I/O watch might be registered with the main loop. If the owner of the
+I/O channel invokes qio_channel_close() while the handshake is waiting
+to continue the I/O watch must be removed. Failing to remove it will
+later trigger the completion callback which the owner is not expecting
+to receive. In the case of the VNC server, this results in a SEGV as
+vnc_disconnect_start() tries to shutdown a client connection that is
+already gone / NULL.
+
+CVE-2023-3354
+Reported-by: jiangyegen <jiangyegen@huawei.com>
+Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/10be627d2b5ec2d6b3dce045144aa739eef678b4]
+CVE: CVE-2023-3354
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ include/io/channel-tls.h | 1 +
+ io/channel-tls.c | 18 ++++++++++++------
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h
+index fdbdf12f..e49e2831 100644
+--- a/include/io/channel-tls.h
++++ b/include/io/channel-tls.h
+@@ -49,6 +49,7 @@ struct QIOChannelTLS {
+ QIOChannel *master;
+ QCryptoTLSSession *session;
+ QIOChannelShutdown shutdown;
++ guint hs_ioc_tag;
+ };
+
+ /**
+diff --git a/io/channel-tls.c b/io/channel-tls.c
+index 7ec8ceff..8b32fbde 100644
+--- a/io/channel-tls.c
++++ b/io/channel-tls.c
+@@ -194,12 +194,13 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
+ }
+
+ trace_qio_channel_tls_handshake_pending(ioc, status);
+- qio_channel_add_watch_full(ioc->master,
+- condition,
+- qio_channel_tls_handshake_io,
+- data,
+- NULL,
+- context);
++ ioc->hs_ioc_tag =
++ qio_channel_add_watch_full(ioc->master,
++ condition,
++ qio_channel_tls_handshake_io,
++ data,
++ NULL,
++ context);
+ }
+ }
+
+@@ -214,6 +215,7 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc,
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(
+ qio_task_get_source(task));
+
++ tioc->hs_ioc_tag = 0;
+ g_free(data);
+ qio_channel_tls_handshake_task(tioc, task, context);
+
+@@ -371,6 +373,10 @@ static int qio_channel_tls_close(QIOChannel *ioc,
+ {
+ QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc);
+
++ if (tioc->hs_ioc_tag) {
++ g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove);
++ }
++
+ return qio_channel_close(tioc->master, errp);
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
new file mode 100644
index 0000000000..db02210fa4
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/CVE-2023-5088.patch
@@ -0,0 +1,114 @@
+From 7d7512019fc40c577e2bdd61f114f31a9eb84a8e Mon Sep 17 00:00:00 2001
+From: Fiona Ebner <f.ebner@proxmox.com>
+Date: Wed, 6 Sep 2023 15:09:21 +0200
+Subject: [PATCH] hw/ide: reset: cancel async DMA operation before resetting
+ state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+If there is a pending DMA operation during ide_bus_reset(), the fact
+that the IDEState is already reset before the operation is canceled
+can be problematic. In particular, ide_dma_cb() might be called and
+then use the reset IDEState which contains the signature after the
+reset. When used to construct the IO operation this leads to
+ide_get_sector() returning 0 and nsector being 1. This is particularly
+bad, because a write command will thus destroy the first sector which
+often contains a partition table or similar.
+
+Traces showing the unsolicited write happening with IDEState
+0x5595af6949d0 being used after reset:
+
+> ahci_port_write ahci(0x5595af6923f0)[0]: port write [reg:PxSCTL] @ 0x2c: 0x00000300
+> ahci_reset_port ahci(0x5595af6923f0)[0]: reset port
+> ide_reset IDEstate 0x5595af6949d0
+> ide_reset IDEstate 0x5595af694da8
+> ide_bus_reset_aio aio_cancel
+> dma_aio_cancel dbs=0x7f64600089a0
+> dma_blk_cb dbs=0x7f64600089a0 ret=0
+> dma_complete dbs=0x7f64600089a0 ret=0 cb=0x5595acd40b30
+> ahci_populate_sglist ahci(0x5595af6923f0)[0]
+> ahci_dma_prepare_buf ahci(0x5595af6923f0)[0]: prepare buf limit=512 prepared=512
+> ide_dma_cb IDEState 0x5595af6949d0; sector_num=0 n=1 cmd=DMA WRITE
+> dma_blk_io dbs=0x7f6420802010 bs=0x5595ae2c6c30 offset=0 to_dev=1
+> dma_blk_cb dbs=0x7f6420802010 ret=0
+
+> (gdb) p *qiov
+> $11 = {iov = 0x7f647c76d840, niov = 1, {{nalloc = 1, local_iov = {iov_base = 0x0,
+> iov_len = 512}}, {__pad = "\001\000\000\000\000\000\000\000\000\000\000",
+> size = 512}}}
+> (gdb) bt
+> #0 blk_aio_pwritev (blk=0x5595ae2c6c30, offset=0, qiov=0x7f6420802070, flags=0,
+> cb=0x5595ace6f0b0 <dma_blk_cb>, opaque=0x7f6420802010)
+> at ../block/block-backend.c:1682
+> #1 0x00005595ace6f185 in dma_blk_cb (opaque=0x7f6420802010, ret=<optimized out>)
+> at ../softmmu/dma-helpers.c:179
+> #2 0x00005595ace6f778 in dma_blk_io (ctx=0x5595ae0609f0,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> io_func=io_func@entry=0x5595ace6ee30 <dma_blk_write_io_func>,
+> io_func_opaque=io_func_opaque@entry=0x5595ae2c6c30,
+> cb=0x5595acd40b30 <ide_dma_cb>, opaque=0x5595af6949d0,
+> dir=DMA_DIRECTION_TO_DEVICE) at ../softmmu/dma-helpers.c:244
+> #3 0x00005595ace6f90a in dma_blk_write (blk=0x5595ae2c6c30,
+> sg=sg@entry=0x5595af694d00, offset=offset@entry=0, align=align@entry=512,
+> cb=cb@entry=0x5595acd40b30 <ide_dma_cb>, opaque=opaque@entry=0x5595af6949d0)
+> at ../softmmu/dma-helpers.c:280
+> #4 0x00005595acd40e18 in ide_dma_cb (opaque=0x5595af6949d0, ret=<optimized out>)
+> at ../hw/ide/core.c:953
+> #5 0x00005595ace6f319 in dma_complete (ret=0, dbs=0x7f64600089a0)
+> at ../softmmu/dma-helpers.c:107
+> #6 dma_blk_cb (opaque=0x7f64600089a0, ret=0) at ../softmmu/dma-helpers.c:127
+> #7 0x00005595ad12227d in blk_aio_complete (acb=0x7f6460005b10)
+> at ../block/block-backend.c:1527
+> #8 blk_aio_complete (acb=0x7f6460005b10) at ../block/block-backend.c:1524
+> #9 blk_aio_write_entry (opaque=0x7f6460005b10) at ../block/block-backend.c:1594
+> #10 0x00005595ad258cfb in coroutine_trampoline (i0=<optimized out>,
+> i1=<optimized out>) at ../util/coroutine-ucontext.c:177
+
+Signed-off-by: Fiona Ebner <f.ebner@proxmox.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Tested-by: simon.rowe@nutanix.com
+Message-ID: <20230906130922.142845-1-f.ebner@proxmox.com>
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+
+Upstream-Status: Backport [https://gitlab.com/qemu-project/qemu/-/commit/7d7512019fc40c577e2bdd61f114f31a9eb84a8e]
+CVE: CVE-2023-5088
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/ide/core.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/hw/ide/core.c b/hw/ide/core.c
+index b5e0dcd29b2..63ba665f3d2 100644
+--- a/hw/ide/core.c
++++ b/hw/ide/core.c
+@@ -2515,19 +2515,19 @@ static void ide_dummy_transfer_stop(IDEState *s)
+
+ void ide_bus_reset(IDEBus *bus)
+ {
+- bus->unit = 0;
+- bus->cmd = 0;
+- ide_reset(&bus->ifs[0]);
+- ide_reset(&bus->ifs[1]);
+- ide_clear_hob(bus);
+-
+- /* pending async DMA */
++ /* pending async DMA - needs the IDEState before it is reset */
+ if (bus->dma->aiocb) {
+ trace_ide_bus_reset_aio();
+ blk_aio_cancel(bus->dma->aiocb);
+ bus->dma->aiocb = NULL;
+ }
+
++ bus->unit = 0;
++ bus->cmd = 0;
++ ide_reset(&bus->ifs[0]);
++ ide_reset(&bus->ifs[1]);
++ ide_clear_hob(bus);
++
+ /* reset dma provider too */
+ if (bus->dma->ops->reset) {
+ bus->dma->ops->reset(bus->dma);
+--
+GitLab
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
new file mode 100644
index 0000000000..0fdae8351a
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-handle-dma-errors.patch
@@ -0,0 +1,146 @@
+From ea2a7c7676d8eb9d1458eaa4b717df46782dcb3a Mon Sep 17 00:00:00 2001
+From: Gaurav Gupta <gauragup@cisco.com>
+Date: Wed, 29 Mar 2023 14:07:17 -0700
+Subject: [PATCH 2/2] hw/block/nvme: handle dma errors
+
+Handling DMA errors gracefully is required for the device to pass the
+block/011 test ("disable PCI device while doing I/O") in the blktests
+suite.
+
+With this patch the device sets the Controller Fatal Status bit in the
+CSTS register when failing to read from a submission queue or writing to
+a completion queue; expecting the host to reset the controller.
+
+If DMA errors occur at any other point in the execution of the command
+(say, while mapping the PRPs), the command is aborted with a Data
+Transfer Error status code.
+
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+Signed-off-by: Gaurav Gupta <gauragup@cisco.com>
+---
+ hw/block/nvme.c | 41 +++++++++++++++++++++++++++++++----------
+ hw/block/trace-events | 3 +++
+ 2 files changed, 34 insertions(+), 10 deletions(-)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index e6f24a6..bda446d 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -60,14 +60,14 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
+ return addr >= low && addr < hi;
+ }
+
+-static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
++static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+- return;
++ return 0;
+ }
+
+- pci_dma_read(&n->parent_obj, addr, buf, size);
++ return pci_dma_read(&n->parent_obj, addr, buf, size);
+ }
+
+ static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
+@@ -152,6 +152,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+ hwaddr trans_len = n->page_size - (prp1 % n->page_size);
+ trans_len = MIN(len, trans_len);
+ int num_prps = (len >> n->page_bits) + 1;
++ int ret;
+
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_prp();
+@@ -178,7 +179,11 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+
+ nents = (len + n->page_size - 1) >> n->page_bits;
+ prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
+- nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
++ ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
++ if (ret) {
++ trace_pci_nvme_err_addr_read(prp2);
++ return NVME_DATA_TRAS_ERROR;
++ }
+ while (len != 0) {
+ uint64_t prp_ent = le64_to_cpu(prp_list[i]);
+
+@@ -191,8 +196,12 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
+ i = 0;
+ nents = (len + n->page_size - 1) >> n->page_bits;
+ prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
+- nvme_addr_read(n, prp_ent, (void *)prp_list,
+- prp_trans);
++ ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
++ prp_trans);
++ if (ret) {
++ trace_pci_nvme_err_addr_read(prp_ent);
++ return NVME_DATA_TRAS_ERROR;
++ }
+ prp_ent = le64_to_cpu(prp_list[i]);
+ }
+
+@@ -286,6 +295,7 @@ static void nvme_post_cqes(void *opaque)
+ NvmeCQueue *cq = opaque;
+ NvmeCtrl *n = cq->ctrl;
+ NvmeRequest *req, *next;
++ int ret;
+
+ QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
+ NvmeSQueue *sq;
+@@ -295,15 +305,21 @@ static void nvme_post_cqes(void *opaque)
+ break;
+ }
+
+- QTAILQ_REMOVE(&cq->req_list, req, entry);
+ sq = req->sq;
+ req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
+ req->cqe.sq_id = cpu_to_le16(sq->sqid);
+ req->cqe.sq_head = cpu_to_le16(sq->head);
+ addr = cq->dma_addr + cq->tail * n->cqe_size;
++ ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
++ sizeof(req->cqe));
++ if (ret) {
++ trace_pci_nvme_err_addr_write(addr);
++ trace_pci_nvme_err_cfs();
++ n->bar.csts = NVME_CSTS_FAILED;
++ break;
++ }
++ QTAILQ_REMOVE(&cq->req_list, req, entry);
+ nvme_inc_cq_tail(cq);
+- pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
+- sizeof(req->cqe));
+ QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
+ }
+ if (cq->tail != cq->head) {
+@@ -888,7 +904,12 @@ static void nvme_process_sq(void *opaque)
+
+ while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
+ addr = sq->dma_addr + sq->head * n->sqe_size;
+- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
++ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
++ trace_pci_nvme_err_addr_read(addr);
++ trace_pci_nvme_err_cfs();
++ n->bar.csts = NVME_CSTS_FAILED;
++ break;
++ }
+ nvme_inc_sq_head(sq);
+
+ req = QTAILQ_FIRST(&sq->req_list);
+diff --git a/hw/block/trace-events b/hw/block/trace-events
+index c03e80c..4e4ad4e 100644
+--- a/hw/block/trace-events
++++ b/hw/block/trace-events
+@@ -60,6 +60,9 @@ nvme_mmio_shutdown_set(void) "shutdown bit set"
+ nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
+
+ # nvme traces for error conditions
++pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
++pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
++pci_nvme_err_cfs(void) "controller fatal status"
+ nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
+ nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
+ nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
new file mode 100644
index 0000000000..66ada52efb
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-block-nvme-refactor-nvme_addr_read.patch
@@ -0,0 +1,55 @@
+From 55428706d5b0b8889b8e009eac77137bb556a4f0 Mon Sep 17 00:00:00 2001
+From: Klaus Jensen <k.jensen@samsung.com>
+Date: Tue, 9 Jun 2020 21:03:17 +0200
+Subject: [PATCH 1/2] hw/block/nvme: refactor nvme_addr_read
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Pull the controller memory buffer check to its own function. The check
+will be used on its own in later patches.
+
+Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
+Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Message-Id: <20200609190333.59390-7-its@irrelevant.dk>
+Signed-off-by: Kevin Wolf <kwolf@redhat.com>
+---
+ hw/block/nvme.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/hw/block/nvme.c b/hw/block/nvme.c
+index 12d8254..e6f24a6 100644
+--- a/hw/block/nvme.c
++++ b/hw/block/nvme.c
+@@ -52,14 +52,22 @@
+
+ static void nvme_process_sq(void *opaque);
+
++static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
++{
++ hwaddr low = n->ctrl_mem.addr;
++ hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
++
++ return addr >= low && addr < hi;
++}
++
+ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+ {
+- if (n->cmbsz && addr >= n->ctrl_mem.addr &&
+- addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
++ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
+- } else {
+- pci_dma_read(&n->parent_obj, addr, buf, size);
++ return;
+ }
++
++ pci_dma_read(&n->parent_obj, addr, buf, size);
+ }
+
+ static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
+--
+1.8.3.1
+
diff --git a/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch b/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
new file mode 100644
index 0000000000..f380be486c
--- /dev/null
+++ b/meta/recipes-devtools/qemu/qemu/hw-display-qxl-Pass-requested-buffer-size-to-qxl_phy.patch
@@ -0,0 +1,236 @@
+From 5a44a01c9eca6507be45d107c27377a3e8d0ee8c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= <philmd@linaro.org>
+Date: Mon, 28 Nov 2022 21:27:39 +0100
+Subject: [PATCH] hw/display/qxl: Pass requested buffer size to qxl_phys2virt()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently qxl_phys2virt() doesn't check for buffer overrun.
+In order to do so in the next commit, pass the buffer size
+as argument.
+
+For QXLCursor in qxl_render_cursor() -> qxl_cursor() we
+verify the size of the chunked data ahead, checking we can
+access 'sizeof(QXLCursor) + chunk->data_size' bytes.
+Since in the SPICE_CURSOR_TYPE_MONO case the cursor is
+assumed to fit in one chunk, no change are required.
+In SPICE_CURSOR_TYPE_ALPHA the ahead read is handled in
+qxl_unpack_chunks().
+
+Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
+Acked-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Message-Id: <20221128202741.4945-4-philmd@linaro.org>
+
+Backport and rebase patch to fix compile error which imported by CVE-2022-4144.patch:
+
+/qxl.c: In function 'qxl_phys2virt':
+| /home/hitendra/work/yocto-work/cgx-data/dunfell-3.1/x86-generic-64-5.4-3.1-cgx/project/tmp/work/i586-montavistamllib32-linux/lib32-qemu/4.2.0-r0.8/qemu-4.2.0/hw/display/qxl.c:1508:67: error: 'size' undeclared (first use in this function); did you mean 'gsize'?
+| 1508 | if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) {
+| | ^~~~
+| | gsize
+
+Upstream-Status: Backport [https://github.com/qemu/qemu/commit/61c34fc && https://gitlab.com/qemu-project/qemu/-/commit/8efec0ef8bbc1e75a7ebf6e325a35806ece9b39f]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ hw/display/qxl-logger.c | 22 +++++++++++++++++++---
+ hw/display/qxl-render.c | 20 ++++++++++++++++----
+ hw/display/qxl.c | 17 +++++++++++------
+ hw/display/qxl.h | 3 ++-
+ 4 files changed, 48 insertions(+), 14 deletions(-)
+
+diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c
+index 2ec6d8fa..031ddfec 100644
+--- a/hw/display/qxl-logger.c
++++ b/hw/display/qxl-logger.c
+@@ -106,7 +106,7 @@ static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id)
+ QXLImage *image;
+ QXLImageDescriptor *desc;
+
+- image = qxl_phys2virt(qxl, addr, group_id);
++ image = qxl_phys2virt(qxl, addr, group_id, sizeof(QXLImage));
+ if (!image) {
+ return 1;
+ }
+@@ -216,7 +216,8 @@ int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id)
+ cmd->u.set.position.y,
+ cmd->u.set.visible ? "yes" : "no",
+ cmd->u.set.shape);
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id);
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id,
++ sizeof(QXLCursor));
+ if (!cursor) {
+ return 1;
+ }
+@@ -238,6 +239,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ {
+ bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT;
+ void *data;
++ size_t datasz;
+ int ret;
+
+ if (!qxl->cmdlog) {
+@@ -249,7 +251,20 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_name(qxl_type, ext->cmd.type),
+ compat ? "(compat)" : "");
+
+- data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ switch (ext->cmd.type) {
++ case QXL_CMD_DRAW:
++ datasz = compat ? sizeof(QXLCompatDrawable) : sizeof(QXLDrawable);
++ break;
++ case QXL_CMD_SURFACE:
++ datasz = sizeof(QXLSurfaceCmd);
++ break;
++ case QXL_CMD_CURSOR:
++ datasz = sizeof(QXLCursorCmd);
++ break;
++ default:
++ goto out;
++ }
++ data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, datasz);
+ if (!data) {
+ return 1;
+ }
+@@ -271,6 +286,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext)
+ qxl_log_cmd_cursor(qxl, data, ext->group_id);
+ break;
+ }
++out:
+ fprintf(stderr, "\n");
+ return 0;
+ }
+diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c
+index d532e157..a65a6d64 100644
+--- a/hw/display/qxl-render.c
++++ b/hw/display/qxl-render.c
+@@ -107,7 +107,9 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
+ qxl->guest_primary.resized = 0;
+ qxl->guest_primary.data = qxl_phys2virt(qxl,
+ qxl->guest_primary.surface.mem,
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST,
++ qxl->guest_primary.abs_stride
++ * height);
+ if (!qxl->guest_primary.data) {
+ return;
+ }
+@@ -222,7 +224,8 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl,
+ if (offset == size) {
+ return;
+ }
+- chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id);
++ chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id,
++ sizeof(QXLDataChunk) + chunk->data_size);
+ if (!chunk) {
+ return;
+ }
+@@ -289,7 +292,8 @@ fail:
+ /* called from spice server thread context only */
+ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+ QXLCursor *cursor;
+ QEMUCursor *c;
+
+@@ -308,7 +312,15 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
+ }
+ switch (cmd->type) {
+ case QXL_CURSOR_SET:
+- cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
++ /* First read the QXLCursor to get QXLDataChunk::data_size ... */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor));
++ if (!cursor) {
++ return 1;
++ }
++ /* Then read including the chunked data following QXLCursor. */
++ cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id,
++ sizeof(QXLCursor) + cursor->chunk.data_size);
+ if (!cursor) {
+ return 1;
+ }
+diff --git a/hw/display/qxl.c b/hw/display/qxl.c
+index 6bc8385b..858d3e93 100644
+--- a/hw/display/qxl.c
++++ b/hw/display/qxl.c
+@@ -275,7 +275,8 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay)
+ QXL_IO_MONITORS_CONFIG_ASYNC));
+ }
+
+- cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST);
++ cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST,
++ sizeof(QXLMonitorsConfig));
+ if (cfg != NULL && cfg->count == 1) {
+ qxl->guest_primary.resized = 1;
+ qxl->guest_head0_width = cfg->heads[0].width;
+@@ -460,7 +461,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ switch (le32_to_cpu(ext->cmd.type)) {
+ case QXL_CMD_SURFACE:
+ {
+- QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLSurfaceCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -494,7 +496,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
+ }
+ case QXL_CMD_CURSOR:
+ {
+- QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCursorCmd));
+
+ if (!cmd) {
+ return 1;
+@@ -674,7 +677,8 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext)
+ *
+ * https://cgit.freedesktop.org/spice/win32/qxl-wddm-dod/commit/?id=f6e099db39e7d0787f294d5fd0dce328b5210faa
+ */
+- void *msg = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
++ void *msg = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id,
++ sizeof(QXLCommandRing));
+ if (msg != NULL && (
+ msg < (void *)qxl->vga.vram_ptr ||
+ msg > ((void *)qxl->vga.vram_ptr + qxl->vga.vram_size))) {
+@@ -1494,7 +1498,8 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl,
+ }
+
+ /* can be also called from spice server thread context */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id,
++ size_t size)
+ {
+ uint64_t offset;
+ uint32_t slot;
+@@ -1994,7 +1999,7 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl)
+ }
+
+ cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i],
+- MEMSLOT_GROUP_GUEST);
++ MEMSLOT_GROUP_GUEST, sizeof(QXLSurfaceCmd));
+ assert(cmd);
+ assert(cmd->type == QXL_SURFACE_CMD_CREATE);
+ qxl_dirty_one_surface(qxl, cmd->u.surface_create.data,
+diff --git a/hw/display/qxl.h b/hw/display/qxl.h
+index 80eb0d26..fcfd133a 100644
+--- a/hw/display/qxl.h
++++ b/hw/display/qxl.h
+@@ -147,7 +147,8 @@ typedef struct PCIQXLDevice {
+ #define QXL_DEFAULT_REVISION QXL_REVISION_STABLE_V12
+
+ /* qxl.c */
+-void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id);
++void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id,
++ size_t size);
+ void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
+ GCC_FMT_ATTR(2, 3);
+
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/qemu/qemu_4.2.0.bb b/meta/recipes-devtools/qemu/qemu_4.2.0.bb
index f9905e2812..05449afe4e 100644
--- a/meta/recipes-devtools/qemu/qemu_4.2.0.bb
+++ b/meta/recipes-devtools/qemu/qemu_4.2.0.bb
@@ -24,8 +24,8 @@ do_install_append_class-nativesdk() {
}
PACKAGECONFIG ??= " \
- fdt sdl kvm \
+ fdt sdl kvm slirp \
${@bb.utils.filter('DISTRO_FEATURES', 'alsa xen', d)} \
${@bb.utils.filter('DISTRO_FEATURES', 'seccomp', d)} \
"
-PACKAGECONFIG_class-nativesdk ??= "fdt sdl kvm"
+PACKAGECONFIG:class-nativesdk ??= "fdt sdl kvm slirp"
diff --git a/meta/recipes-devtools/quilt/quilt.inc b/meta/recipes-devtools/quilt/quilt.inc
index d7ecda7aaa..ad23b8d922 100644
--- a/meta/recipes-devtools/quilt/quilt.inc
+++ b/meta/recipes-devtools/quilt/quilt.inc
@@ -12,6 +12,7 @@ SRC_URI = "${SAVANNAH_GNU_MIRROR}/quilt/quilt-${PV}.tar.gz \
file://Makefile \
file://test.sh \
file://0001-tests-Allow-different-output-from-mv.patch \
+ file://faildiff-order.patch \
"
SRC_URI_append_class-target = " file://gnu_patch_test_fix_target.patch"
diff --git a/meta/recipes-devtools/quilt/quilt/faildiff-order.patch b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
new file mode 100644
index 0000000000..f22065a250
--- /dev/null
+++ b/meta/recipes-devtools/quilt/quilt/faildiff-order.patch
@@ -0,0 +1,41 @@
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+From 4dfe7f9e702c85243a71e4de267a13e434b6d6c2 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <jdelvare@suse.de>
+Date: Fri, 20 Jan 2023 12:56:08 +0100
+Subject: [PATCH] test: Fix a race condition
+
+The test suite does not differentiate between stdout and stderr. When
+messages are printed to both, the order in which they will reach us
+is apparently not guaranteed. Ideally this would be deterministic, but
+until then, explicitly test stdout and stderr separately in the test
+case itself. Otherwise the test suite fails randomly, which is a pain
+for distribution package maintainers.
+
+This fixes bug #63651 reported by Ross Burton:
+https://savannah.nongnu.org/bugs/index.php?63651
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+---
+ test/faildiff.test | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/test/faildiff.test b/test/faildiff.test
+index 5afb8e3..0444c15 100644
+--- a/test/faildiff.test
++++ b/test/faildiff.test
+@@ -27,8 +27,9 @@ What happens on binary files?
+ > File test.bin added to patch %{P}test.diff
+
+ $ printf "\\003\\000\\001" > test.bin
+- $ quilt diff -pab --no-index
++ $ quilt diff -pab --no-index 2>/dev/null
+ >~ (Files|Binary files) a/test\.bin and b/test\.bin differ
++ $ quilt diff -pab --no-index >/dev/null
+ > Diff failed on file 'test.bin', aborting
+ $ echo %{?}
+ > 1
+--
+2.34.1
+
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch
new file mode 100644
index 0000000000..0882d6f310
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-01.patch
@@ -0,0 +1,60 @@
+From b5e8bc74b2b05aa557f663fe227b94d2bc64fbd8 Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:51:10 +0300
+Subject: [PATCH] Process MPI's from all kinds of signatures
+
+No immediate effect but needed by the following commits.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/b5e8bc74b2b05aa557f663fe227b94d2bc64fbd8]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index ee5c81e246..340de5fc9a 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -511,7 +511,7 @@ pgpDigAlg pgpDigAlgFree(pgpDigAlg alg)
+ return NULL;
+ }
+
+-static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
++static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo,
+ const uint8_t *p, const uint8_t *h, size_t hlen,
+ pgpDigParams sigp)
+ {
+@@ -524,10 +524,8 @@ static int pgpPrtSigParams(pgpTag tag, uint8_t pubkey_algo, uint8_t sigtype,
+ int mpil = pgpMpiLen(p);
+ if (p + mpil > pend)
+ break;
+- if (sigtype == PGPSIGTYPE_BINARY || sigtype == PGPSIGTYPE_TEXT) {
+- if (sigalg->setmpi(sigalg, i, p))
+- break;
+- }
++ if (sigalg->setmpi(sigalg, i, p))
++ break;
+ p += mpil;
+ }
+
+@@ -600,7 +598,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
+ }
+
+ p = ((uint8_t *)v) + sizeof(*v);
+- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
++ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
+ } break;
+ case 4:
+ { pgpPktSigV4 v = (pgpPktSigV4)h;
+@@ -658,7 +656,7 @@ static int pgpPrtSig(pgpTag tag, const uint8_t *h, size_t hlen,
+ if (p > (h + hlen))
+ return 1;
+
+- rc = pgpPrtSigParams(tag, v->pubkey_algo, v->sigtype, p, h, hlen, _digp);
++ rc = pgpPrtSigParams(tag, v->pubkey_algo, p, h, hlen, _digp);
+ } break;
+ default:
+ rpmlog(RPMLOG_WARNING, _("Unsupported version of key: V%d\n"), version);
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch
new file mode 100644
index 0000000000..c5f88a8c72
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-02.patch
@@ -0,0 +1,55 @@
+From 9f03f42e2614a68f589f9db8fe76287146522c0c Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:56:20 +0300
+Subject: [PATCH] Refactor pgpDigParams construction to helper function
+
+No functional changes, just to reduce code duplication and needed by
+the following commits.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/9f03f42e2614a68f589f9db8fe76287146522c0c]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index 340de5fc9a..aad7c275c9 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1055,6 +1055,13 @@ unsigned int pgpDigParamsAlgo(pgpDigParams digp, unsigned int algotype)
+ return algo;
+ }
+
++static pgpDigParams pgpDigParamsNew(uint8_t tag)
++{
++ pgpDigParams digp = xcalloc(1, sizeof(*digp));
++ digp->tag = tag;
++ return digp;
++}
++
+ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ pgpDigParams * ret)
+ {
+@@ -1072,8 +1079,7 @@ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ if (pkttype && pkt.tag != pkttype) {
+ break;
+ } else {
+- digp = xcalloc(1, sizeof(*digp));
+- digp->tag = pkt.tag;
++ digp = pgpDigParamsNew(pkt.tag);
+ }
+ }
+
+@@ -1121,8 +1127,7 @@ int pgpPrtParamsSubkeys(const uint8_t *pkts, size_t pktlen,
+ digps = xrealloc(digps, alloced * sizeof(*digps));
+ }
+
+- digps[count] = xcalloc(1, sizeof(**digps));
+- digps[count]->tag = PGPTAG_PUBLIC_SUBKEY;
++ digps[count] = pgpDigParamsNew(PGPTAG_PUBLIC_SUBKEY);
+ /* Copy UID from main key to subkey */
+ digps[count]->userid = xstrdup(mainkey->userid);
+
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch
new file mode 100644
index 0000000000..fd31f11beb
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521-03.patch
@@ -0,0 +1,34 @@
+From 5ff86764b17f31535cb247543a90dd739076ec38 Mon Sep 17 00:00:00 2001
+From: Demi Marie Obenour <demi@invisiblethingslab.com>
+Date: Thu, 6 May 2021 18:34:45 -0400
+Subject: [PATCH] Do not allow extra packets to follow a signature
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+According to RFC 4880 § 11.4, a detached signature is “simply a
+Signature packet”. Therefore, extra packets following a detached
+signature are not allowed.
+
+Dependent patch:
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/5ff86764b17f31535cb247543a90dd739076ec38]
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+---
+ rpmio/rpmpgp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index f1a99e7169..5b346a8253 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1068,6 +1068,8 @@ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ break;
+
+ p += (pkt.body - pkt.head) + pkt.blen;
++ if (pkttype == PGPTAG_SIGNATURE)
++ break;
+ }
+
+ rc = (digp && (p == pend)) ? 0 : -1;
diff --git a/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch b/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch
new file mode 100644
index 0000000000..cb9e9842fe
--- /dev/null
+++ b/meta/recipes-devtools/rpm/files/CVE-2021-3521.patch
@@ -0,0 +1,330 @@
+From bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8 Mon Sep 17 00:00:00 2001
+From: Panu Matilainen <pmatilai@redhat.com>
+Date: Thu, 30 Sep 2021 09:59:30 +0300
+Subject: [PATCH] Validate and require subkey binding signatures on PGP public
+ keys
+
+All subkeys must be followed by a binding signature by the primary key
+as per the OpenPGP RFC, enforce the presence and validity in the parser.
+
+The implementation is as kludgey as they come to work around our
+simple-minded parser structure without touching API, to maximise
+backportability. Store all the raw packets internally as we decode them
+to be able to access previous elements at will, needed to validate ordering
+and access the actual data. Add testcases for manipulated keys whose
+import previously would succeed.
+
+Depends on the two previous commits:
+7b399fcb8f52566e6f3b4327197a85facd08db91 and
+236b802a4aa48711823a191d1b7f753c82a89ec5
+
+CVE: CVE-2021-3521
+Upstream-Status: Backport [https://github.com/rpm-software-management/rpm/commit/bd36c5dc9fb6d90c46fbfed8c2d67516fc571ec8]
+Comment: Hunk refreshed
+Signed-off-by: Riyaz Khan <Riyaz.Khan@kpit.com>
+
+Fixes CVE-2021-3521.
+---
+ rpmio/rpmpgp.c | 98 +++++++++++++++++--
+ tests/Makefile.am | 3 +
+ tests/data/keys/CVE-2021-3521-badbind.asc | 25 +++++
+ .../data/keys/CVE-2021-3521-nosubsig-last.asc | 25 +++++
+ tests/data/keys/CVE-2021-3521-nosubsig.asc | 37 +++++++
+ tests/rpmsigdig.at | 28 ++++++
+ 6 files changed, 209 insertions(+), 7 deletions(-)
+ create mode 100644 tests/data/keys/CVE-2021-3521-badbind.asc
+ create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+ create mode 100644 tests/data/keys/CVE-2021-3521-nosubsig.asc
+
+diff --git a/rpmio/rpmpgp.c b/rpmio/rpmpgp.c
+index aad7c275c9..d70802ae86 100644
+--- a/rpmio/rpmpgp.c
++++ b/rpmio/rpmpgp.c
+@@ -1004,37 +1004,121 @@ static pgpDigParams pgpDigParamsNew(uint8_t tag)
+ return digp;
+ }
+
++static int hashKey(DIGEST_CTX hash, const struct pgpPkt *pkt, int exptag)
++{
++ int rc = -1;
++ if (pkt->tag == exptag) {
++ uint8_t head[] = {
++ 0x99,
++ (pkt->blen >> 8),
++ (pkt->blen ),
++ };
++
++ rpmDigestUpdate(hash, head, 3);
++ rpmDigestUpdate(hash, pkt->body, pkt->blen);
++ rc = 0;
++ }
++ return rc;
++}
++
++static int pgpVerifySelf(pgpDigParams key, pgpDigParams selfsig,
++ const struct pgpPkt *all, int i)
++{
++ int rc = -1;
++ DIGEST_CTX hash = NULL;
++
++ switch (selfsig->sigtype) {
++ case PGPSIGTYPE_SUBKEY_BINDING:
++ hash = rpmDigestInit(selfsig->hash_algo, 0);
++ if (hash) {
++ rc = hashKey(hash, &all[0], PGPTAG_PUBLIC_KEY);
++ if (!rc)
++ rc = hashKey(hash, &all[i-1], PGPTAG_PUBLIC_SUBKEY);
++ }
++ break;
++ default:
++ /* ignore types we can't handle */
++ rc = 0;
++ break;
++ }
++
++ if (hash && rc == 0)
++ rc = pgpVerifySignature(key, selfsig, hash);
++
++ rpmDigestFinal(hash, NULL, NULL, 0);
++
++ return rc;
++}
++
+ int pgpPrtParams(const uint8_t * pkts, size_t pktlen, unsigned int pkttype,
+ pgpDigParams * ret)
+ {
+ const uint8_t *p = pkts;
+ const uint8_t *pend = pkts + pktlen;
+ pgpDigParams digp = NULL;
+- struct pgpPkt pkt;
++ pgpDigParams selfsig = NULL;
++ int i = 0;
++ int alloced = 16; /* plenty for normal cases */
++ struct pgpPkt *all = xmalloc(alloced * sizeof(*all));
+ int rc = -1; /* assume failure */
++ int expect = 0;
++ int prevtag = 0;
+
+ while (p < pend) {
+- if (decodePkt(p, (pend - p), &pkt))
++ struct pgpPkt *pkt = &all[i];
++ if (decodePkt(p, (pend - p), pkt))
+ break;
+
+ if (digp == NULL) {
+- if (pkttype && pkt.tag != pkttype) {
++ if (pkttype && pkt->tag != pkttype) {
+ break;
+ } else {
+- digp = pgpDigParamsNew(pkt.tag);
++ digp = pgpDigParamsNew(pkt->tag);
+ }
+ }
+
+- if (pgpPrtPkt(&pkt, digp))
++ if (expect) {
++ if (pkt->tag != expect)
++ break;
++ selfsig = pgpDigParamsNew(pkt->tag);
++ }
++
++ if (pgpPrtPkt(pkt, selfsig ? selfsig : digp))
+ break;
+
+- p += (pkt.body - pkt.head) + pkt.blen;
++ if (selfsig) {
++ /* subkeys must be followed by binding signature */
++ if (prevtag == PGPTAG_PUBLIC_SUBKEY) {
++ if (selfsig->sigtype != PGPSIGTYPE_SUBKEY_BINDING)
++ break;
++ }
++
++ int xx = pgpVerifySelf(digp, selfsig, all, i);
++
++ selfsig = pgpDigParamsFree(selfsig);
++ if (xx)
++ break;
++ expect = 0;
++ }
++
++ if (pkt->tag == PGPTAG_PUBLIC_SUBKEY)
++ expect = PGPTAG_SIGNATURE;
++ prevtag = pkt->tag;
++
++ i++;
++ p += (pkt->body - pkt->head) + pkt->blen;
+ if (pkttype == PGPTAG_SIGNATURE)
+ break;
++
++ if (alloced <= i) {
++ alloced *= 2;
++ all = xrealloc(all, alloced * sizeof(*all));
++ }
+ }
+
+- rc = (digp && (p == pend)) ? 0 : -1;
++ rc = (digp && (p == pend) && expect == 0) ? 0 : -1;
+
++ free(all);
+ if (ret && rc == 0) {
+ *ret = digp;
+ } else {
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index b4a2e2e1ce..bc535d2833 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -87,6 +87,9 @@ EXTRA_DIST += data/SPECS/hello-config-buildid.spec
+ EXTRA_DIST += data/SPECS/hello-cd.spec
+ EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.pub
+ EXTRA_DIST += data/keys/rpm.org-rsa-2048-test.secret
++EXTRA_DIST += data/keys/CVE-2021-3521-badbind.asc
++EXTRA_DIST += data/keys/CVE-2021-3521-nosubsig.asc
++EXTRA_DIST += data/keys/CVE-2021-3521-nosubsig-last.asc
+ EXTRA_DIST += data/macros.testfile
+
+ # testsuite voodoo
+diff --git a/tests/data/keys/CVE-2021-3521-badbind.asc b/tests/data/keys/CVE-2021-3521-badbind.asc
+new file mode 100644
+index 0000000000..aea00f9d7a
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-badbind.asc
+@@ -0,0 +1,25 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
++=WCfs
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/data/keys/CVE-2021-3521-nosubsig-last.asc b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+new file mode 100644
+index 0000000000..aea00f9d7a
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-nosubsig-last.asc
+@@ -0,0 +1,25 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAE=
++=WCfs
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/data/keys/CVE-2021-3521-nosubsig.asc b/tests/data/keys/CVE-2021-3521-nosubsig.asc
+new file mode 100644
+index 0000000000..3a2e7417f8
+--- /dev/null
++++ b/tests/data/keys/CVE-2021-3521-nosubsig.asc
+@@ -0,0 +1,37 @@
++-----BEGIN PGP PUBLIC KEY BLOCK-----
++Version: rpm-4.17.90 (NSS-3)
++
++mQENBFjmORgBCAC7TMEk6wnjSs8Dr4yqSScWdU2pjcqrkTxuzdWvowcIUPZI0w/g
++HkRqGd4apjvY2V15kjL10gk3QhFP3pZ/9p7zh8o8NHX7aGdSGDK7NOq1eFaErPRY
++91LW9RiZ0lbOjXEzIL0KHxUiTQEmdXJT43DJMFPyW9fkCWg0OltiX618FUdWWfI8
++eySdLur1utnqBvdEbCUvWK2RX3vQZQdvEBODnNk2pxqTyV0w6VPQ96W++lF/5Aas
++7rUv3HIyIXxIggc8FRrnH+y9XvvHDonhTIlGnYZN4ubm9i4y3gOkrZlGTrEw7elQ
++1QeMyG2QQEbze8YjpTm4iLABCBrRfPRaQpwrABEBAAG0IXJwbS5vcmcgUlNBIHRl
++c3RrZXkgPHJzYUBycG0ub3JnPokBNwQTAQgAIQUCWOY5GAIbAwULCQgHAgYVCAkK
++CwIEFgIDAQIeAQIXgAAKCRBDRFkeGWTF/MxxCACnjqFL+MmPh9W9JQKT2DcLbBzf
++Cqo6wcEBoCOcwgRSk8dSikhARoteoa55JRJhuMyeKhhEAogE9HRmCPFdjezFTwgB
++BDVBpO2dZ023mLXDVCYX3S8pShOgCP6Tn4wqCnYeAdLcGg106N4xcmgtcssJE+Pr
++XzTZksbZsrTVEmL/Ym+R5w5jBfFnGk7Yw7ndwfQsfNXQb5AZynClFxnX546lcyZX
++fEx3/e6ezw57WNOUK6WT+8b+EGovPkbetK/rGxNXuWaP6X4A/QUm8O98nCuHYFQq
+++mvNdsCBqGf7mhaRGtpHk/JgCn5rFvArMDqLVrR9hX0LdCSsH7EGE+bR3r7wuQEN
++BFjmORgBCACk+vDZrIXQuFXEYToZVwb2attzbbJJCqD71vmZTLsW0QxuPKRgbcYY
++zp4K4lVBnHhFrF8MOUOxJ7kQWIJZMZFt+BDcptCYurbD2H4W2xvnWViiC+LzCMzz
++iMJT6165uefL4JHTDPxC2fFiM9yrc72LmylJNkM/vepT128J5Qv0gRUaQbHiQuS6
++Dm/+WRnUfx3i89SV4mnBxb/Ta93GVqoOciWwzWSnwEnWYAvOb95JL4U7c5J5f/+c
++KnQDHsW7sIiIdscsWzvgf6qs2Ra1Zrt7Fdk4+ZS2f/adagLhDO1C24sXf5XfMk5m
++L0OGwZSr9m5s17VXxfspgU5ugc8kBJfzABEBAAG5AQ0EWOY5GAEIAKT68NmshdC4
++VcRhOhlXBvZq23NtskkKoPvW+ZlMuxbRDG48pGBtxhjOngriVUGceEWsXww5Q7En
++uRBYglkxkW34ENym0Ji6tsPYfhbbG+dZWKIL4vMIzPOIwlPrXrm558vgkdMM/ELZ
++8WIz3KtzvYubKUk2Qz+96lPXbwnlC/SBFRpBseJC5LoOb/5ZGdR/HeLz1JXiacHF
++v9Nr3cZWqg5yJbDNZKfASdZgC85v3kkvhTtzknl//5wqdAMexbuwiIh2xyxbO+B/
++qqzZFrVmu3sV2Tj5lLZ/9p1qAuEM7ULbixd/ld8yTmYvQ4bBlKv2bmzXtVfF+ymB
++Tm6BzyQEl/MAEQEAAYkBHwQYAQgACQUCWOY5GAIbDAAKCRBDRFkeGWTF/PANB/9j
++mifmj6z/EPe0PJFhrpISt9PjiUQCt0IPtiL5zKAkWjHePIzyi+0kCTBF6DDLFxos
++3vN4bWnVKT1kBhZAQlPqpJTg+m74JUYeDGCdNx9SK7oRllATqyu+5rncgxjWVPnQ
++zu/HRPlWJwcVFYEVXYL8xzfantwQTqefjmcRmBRdA2XJITK+hGWwAmrqAWx+q5xX
++Pa8wkNMxVzNS2rUKO9SoVuJ/wlUvfoShkJ/VJ5HDp3qzUqncADfdGN35TDzscngQ
++gHvnMwVBfYfSCABV1hNByoZcc/kxkrWMmsd/EnIyLd1Q1baKqc3cEDuC6E6/o4yJ
++E4XX4jtDmdZPreZALsiB
++=rRop
++-----END PGP PUBLIC KEY BLOCK-----
++
+diff --git a/tests/rpmsigdig.at b/tests/rpmsigdig.at
+index 0f8f2b4884..c8b9f139e1 100644
+--- a/tests/rpmsigdig.at
++++ b/tests/rpmsigdig.at
+@@ -240,6 +240,34 @@ gpg(185e6146f00650f8) = 4:185e6146f00650f8-58e63918
+ [])
+ AT_CLEANUP
+
++AT_SETUP([rpmkeys --import invalid keys])
++AT_KEYWORDS([rpmkeys import])
++RPMDB_INIT
++
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-badbind.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-badbind.asc: key 1 import failed.]
++)
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-nosubsig.asc: key 1 import failed.]
++)
++
++AT_CHECK([
++runroot rpmkeys --import /data/keys/CVE-2021-3521-nosubsig-last.asc
++],
++[1],
++[],
++[error: /data/keys/CVE-2021-3521-nosubsig-last.asc: key 1 import failed.]
++)
++AT_CLEANUP
++
+ # ------------------------------
+ # Test pre-built package verification
+ AT_SETUP([rpmkeys -K <signed> 1])
+
diff --git a/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb b/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
index 376021d913..4d605c8501 100644
--- a/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
+++ b/meta/recipes-devtools/rpm/rpm_4.14.2.1.bb
@@ -47,6 +47,10 @@ SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.14.x;protoc
file://0001-rpmio-Fix-lzopen_internal-mode-parsing-when-Tn-is-us.patch \
file://CVE-2021-3421.patch \
file://CVE-2021-20266.patch \
+ file://CVE-2021-3521-01.patch \
+ file://CVE-2021-3521-02.patch \
+ file://CVE-2021-3521-03.patch \
+ file://CVE-2021-3521.patch \
"
PE = "1"
diff --git a/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch b/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch
new file mode 100644
index 0000000000..b2e02dba97
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/0001-Fix-relative-when-copying-an-absolute-path.patch
@@ -0,0 +1,31 @@
+From fabef23bea6e9963c06e218586fda1a823e3c6bf Mon Sep 17 00:00:00 2001
+From: Wayne Davison <wayne@opencoder.net>
+Date: Mon, 8 Aug 2022 21:30:21 -0700
+Subject: [PATCH] Fix --relative when copying an absolute path.
+
+CVE: CVE-2022-29154
+Upstream-Status: Backport [https://github.com/WayneD/rsync/commit/fabef23bea6e9963c06e218586fda1a823e3c6bf]
+Signed-off-by: Matthias Schmitz <matthias.schmitz@port4949.net>
+---
+ exclude.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/exclude.c b/exclude.c
+index 2394023f..ba5ca5a3 100644
+--- a/exclude.c
++++ b/exclude.c
+@@ -434,8 +434,10 @@ void add_implied_include(const char *arg)
+ *p++ = *cp++;
+ break;
+ case '/':
+- if (p[-1] == '/') /* This is safe because of the initial slash. */
++ if (p[-1] == '/') { /* This is safe because of the initial slash. */
++ cp++;
+ break;
++ }
+ if (relative_paths) {
+ filter_rule const *ent;
+ int found = 0;
+--
+2.39.2
+
diff --git a/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch b/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch
new file mode 100644
index 0000000000..61e4e03254
--- /dev/null
+++ b/meta/recipes-devtools/rsync/files/CVE-2022-29154.patch
@@ -0,0 +1,334 @@
+From b7231c7d02cfb65d291af74ff66e7d8c507ee871 Mon Sep 17 00:00:00 2001
+From: Wayne Davison <wayne@opencoder.net>
+Date: Sun, 31 Jul 2022 16:55:34 -0700
+Subject: [PATCH] Some extra file-list safety checks.
+
+CVE-2022-29154 rsync: remote arbitrary files write inside the
+
+Upstream-Status: Backport from [https://git.samba.org/?p=rsync.git;a=patch;h=b7231c7d02cfb65d291af74ff66e7d8c507ee871]
+CVE:CVE-2022-29154
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ exclude.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ flist.c | 17 ++++++-
+ io.c | 4 ++
+ main.c | 7 ++-
+ receiver.c | 11 +++--
+ 5 files changed, 158 insertions(+), 8 deletions(-)
+
+diff --git a/exclude.c b/exclude.c
+index 7989fb3..e146e96 100644
+--- a/exclude.c
++++ b/exclude.c
+@@ -26,16 +26,21 @@ extern int am_server;
+ extern int am_sender;
+ extern int eol_nulls;
+ extern int io_error;
++extern int xfer_dirs;
++extern int recurse;
+ extern int local_server;
+ extern int prune_empty_dirs;
+ extern int ignore_perishable;
++extern int relative_paths;
+ extern int delete_mode;
+ extern int delete_excluded;
+ extern int cvs_exclude;
+ extern int sanitize_paths;
+ extern int protocol_version;
++extern int list_only;
+ extern int module_id;
+
++extern char *filesfrom_host;
+ extern char curr_dir[MAXPATHLEN];
+ extern unsigned int curr_dir_len;
+ extern unsigned int module_dirlen;
+@@ -43,8 +48,10 @@ extern unsigned int module_dirlen;
+ filter_rule_list filter_list = { .debug_type = "" };
+ filter_rule_list cvs_filter_list = { .debug_type = " [global CVS]" };
+ filter_rule_list daemon_filter_list = { .debug_type = " [daemon]" };
++filter_rule_list implied_filter_list = { .debug_type = " [implied]" };
+
+ int saw_xattr_filter = 0;
++int trust_sender_filter = 0;
+
+ /* Need room enough for ":MODS " prefix plus some room to grow. */
+ #define MAX_RULE_PREFIX (16)
+@@ -293,6 +300,123 @@ static void add_rule(filter_rule_list *listp, const char *pat, unsigned int pat_
+ }
+ }
+
++/* Each arg the client sends to the remote sender turns into an implied include
++ * that the receiver uses to validate the file list from the sender. */
++void add_implied_include(const char *arg)
++{
++ filter_rule *rule;
++ int arg_len, saw_wild = 0, backslash_cnt = 0;
++ int slash_cnt = 1; /* We know we're adding a leading slash. */
++ const char *cp;
++ char *p;
++ if (relative_paths) {
++ cp = strstr(arg, "/./");
++ if (cp)
++ arg = cp+3;
++ } else {
++ if ((cp = strrchr(arg, '/')) != NULL)
++ arg = cp + 1;
++ }
++ arg_len = strlen(arg);
++ if (arg_len) {
++ if (strpbrk(arg, "*[?")) {
++ /* We need to add room to escape backslashes if wildcard chars are present. */
++ cp = arg;
++ while ((cp = strchr(cp, '\\')) != NULL) {
++ arg_len++;
++ cp++;
++ }
++ saw_wild = 1;
++ }
++ arg_len++; /* Leave room for the prefixed slash */
++ rule = new0(filter_rule);
++ if (!implied_filter_list.head)
++ implied_filter_list.head = implied_filter_list.tail = rule;
++ else {
++ rule->next = implied_filter_list.head;
++ implied_filter_list.head = rule;
++ }
++ rule->rflags = FILTRULE_INCLUDE + (saw_wild ? FILTRULE_WILD : 0);
++ p = rule->pattern = new_array(char, arg_len + 1);
++ *p++ = '/';
++ cp = arg;
++ while (*cp) {
++ switch (*cp) {
++ case '\\':
++ backslash_cnt++;
++ if (saw_wild)
++ *p++ = '\\';
++ *p++ = *cp++;
++ break;
++ case '/':
++ if (p[-1] == '/') /* This is safe because of the initial slash. */
++ break;
++ if (relative_paths) {
++ filter_rule const *ent;
++ int found = 0;
++ *p = '\0';
++ for (ent = implied_filter_list.head; ent; ent = ent->next) {
++ if (ent != rule && strcmp(ent->pattern, rule->pattern) == 0)
++ found = 1;
++ }
++ if (!found) {
++ filter_rule *R_rule = new0(filter_rule);
++ R_rule->rflags = FILTRULE_INCLUDE + (saw_wild ? FILTRULE_WILD : 0);
++ R_rule->pattern = strdup(rule->pattern);
++ R_rule->u.slash_cnt = slash_cnt;
++ R_rule->next = implied_filter_list.head;
++ implied_filter_list.head = R_rule;
++ }
++ }
++ slash_cnt++;
++ *p++ = *cp++;
++ break;
++ default:
++ *p++ = *cp++;
++ break;
++ }
++ }
++ *p = '\0';
++ rule->u.slash_cnt = slash_cnt;
++ arg = (const char *)rule->pattern;
++ }
++
++ if (recurse || xfer_dirs) {
++ /* Now create a rule with an added "/" & "**" or "*" at the end */
++ rule = new0(filter_rule);
++ if (recurse)
++ rule->rflags = FILTRULE_INCLUDE | FILTRULE_WILD | FILTRULE_WILD2;
++ else
++ rule->rflags = FILTRULE_INCLUDE | FILTRULE_WILD;
++ /* A +4 in the len leaves enough room for / * * \0 or / * \0 \0 */
++ if (!saw_wild && backslash_cnt) {
++ /* We are appending a wildcard, so now the backslashes need to be escaped. */
++ p = rule->pattern = new_array(char, arg_len + backslash_cnt + 3 + 1);
++ cp = arg;
++ while (*cp) {
++ if (*cp == '\\')
++ *p++ = '\\';
++ *p++ = *cp++;
++ }
++ } else {
++ p = rule->pattern = new_array(char, arg_len + 3 + 1);
++ if (arg_len) {
++ memcpy(p, arg, arg_len);
++ p += arg_len;
++ }
++ }
++ if (p[-1] != '/')
++ *p++ = '/';
++ *p++ = '*';
++ if (recurse)
++ *p++ = '*';
++ *p = '\0';
++ rule->u.slash_cnt = slash_cnt + 1;
++ rule->next = implied_filter_list.head;
++ implied_filter_list.head = rule;
++ }
++}
++
+ /* This frees any non-inherited items, leaving just inherited items on the list. */
+ static void pop_filter_list(filter_rule_list *listp)
+ {
+@@ -721,7 +845,7 @@ static void report_filter_result(enum logcode code, char const *name,
+ : name_flags & NAME_IS_DIR ? "directory"
+ : "file";
+ rprintf(code, "[%s] %sing %s %s because of pattern %s%s%s\n",
+- w, actions[*w!='s'][!(ent->rflags & FILTRULE_INCLUDE)],
++ w, actions[*w=='g'][!(ent->rflags & FILTRULE_INCLUDE)],
+ t, name, ent->pattern,
+ ent->rflags & FILTRULE_DIRECTORY ? "/" : "", type);
+ }
+@@ -894,6 +1018,7 @@ static filter_rule *parse_rule_tok(const char **rulestr_ptr,
+ }
+ switch (ch) {
+ case ':':
++ trust_sender_filter = 1;
+ rule->rflags |= FILTRULE_PERDIR_MERGE
+ | FILTRULE_FINISH_SETUP;
+ /* FALL THROUGH */
+diff --git a/flist.c b/flist.c
+index 499440c..630d685 100644
+--- a/flist.c
++++ b/flist.c
+@@ -70,6 +70,7 @@ extern int need_unsorted_flist;
+ extern int sender_symlink_iconv;
+ extern int output_needs_newline;
+ extern int sender_keeps_checksum;
++extern int trust_sender_filter;
+ extern int unsort_ndx;
+ extern uid_t our_uid;
+ extern struct stats stats;
+@@ -80,8 +81,7 @@ extern char curr_dir[MAXPATHLEN];
+
+ extern struct chmod_mode_struct *chmod_modes;
+
+-extern filter_rule_list filter_list;
+-extern filter_rule_list daemon_filter_list;
++extern filter_rule_list filter_list, implied_filter_list, daemon_filter_list;
+
+ #ifdef ICONV_OPTION
+ extern int filesfrom_convert;
+@@ -904,6 +904,19 @@ static struct file_struct *recv_file_entry(int f, struct file_list *flist, int x
+ exit_cleanup(RERR_UNSUPPORTED);
+ }
+
++ if (*thisname != '.' || thisname[1] != '\0') {
++ int filt_flags = S_ISDIR(mode) ? NAME_IS_DIR : NAME_IS_FILE;
++ if (!trust_sender_filter /* a per-dir filter rule means we must trust the sender's filtering */
++ && filter_list.head && check_filter(&filter_list, FINFO, thisname, filt_flags) < 0) {
++ rprintf(FERROR, "ERROR: rejecting excluded file-list name: %s\n", thisname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
++ if (implied_filter_list.head && check_filter(&implied_filter_list, FINFO, thisname, filt_flags) <= 0) {
++ rprintf(FERROR, "ERROR: rejecting unrequested file-list name: %s\n", thisname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
++ }
++
+ if (inc_recurse && S_ISDIR(mode)) {
+ if (one_file_system) {
+ /* Room to save the dir's device for -x */
+diff --git a/io.c b/io.c
+index c04dbd5..698a7da 100644
+--- a/io.c
++++ b/io.c
+@@ -415,6 +415,7 @@ static void forward_filesfrom_data(void)
+ while (s != eob) {
+ if (*s++ == '\0') {
+ ff_xb.len = s - sob - 1;
++ add_implied_include(sob);
+ if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
+ exit_cleanup(RERR_PROTOCOL); /* impossible? */
+ write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
+@@ -446,9 +447,12 @@ static void forward_filesfrom_data(void)
+ char *f = ff_xb.buf + ff_xb.pos;
+ char *t = ff_xb.buf;
+ char *eob = f + len;
++ char *cur = t;
+ /* Eliminate any multi-'\0' runs. */
+ while (f != eob) {
+ if (!(*t++ = *f++)) {
++ add_implied_include(cur);
++ cur = t;
+ while (f != eob && *f == '\0')
+ f++;
+ }
+diff --git a/main.c b/main.c
+index ee9630f..6ec56e7 100644
+--- a/main.c
++++ b/main.c
+@@ -78,6 +78,7 @@ extern BOOL flist_receiving_enabled;
+ extern BOOL shutting_down;
+ extern int backup_dir_len;
+ extern int basis_dir_cnt;
++extern int trust_sender_filter;
+ extern struct stats stats;
+ extern char *stdout_format;
+ extern char *logfile_format;
+@@ -93,7 +94,7 @@ extern char curr_dir[MAXPATHLEN];
+ extern char backup_dir_buf[MAXPATHLEN];
+ extern char *basis_dir[MAX_BASIS_DIRS+1];
+ extern struct file_list *first_flist;
+-extern filter_rule_list daemon_filter_list;
++extern filter_rule_list daemon_filter_list, implied_filter_list;
+
+ uid_t our_uid;
+ gid_t our_gid;
+@@ -534,6 +535,7 @@ static pid_t do_cmd(char *cmd, char *machine, char *user, char **remote_argv, in
+ #ifdef ICONV_CONST
+ setup_iconv();
+ #endif
++ trust_sender_filter = 1;
+ } else if (local_server) {
+ /* If the user didn't request --[no-]whole-file, force
+ * it on, but only if we're not batch processing. */
+@@ -1358,6 +1360,8 @@ static int start_client(int argc, char *argv[])
+ char *dummy_host;
+ int dummy_port = rsync_port;
+ int i;
++ if (filesfrom_fd < 0)
++ add_implied_include(remote_argv[0]);
+ /* For remote source, any extra source args must have either
+ * the same hostname or an empty hostname. */
+ for (i = 1; i < remote_argc; i++) {
+@@ -1381,6 +1385,7 @@ static int start_client(int argc, char *argv[])
+ if (!rsync_port && !*arg) /* Turn an empty arg into a dot dir. */
+ arg = ".";
+ remote_argv[i] = arg;
++ add_implied_include(arg);
+ }
+ }
+
+diff --git a/receiver.c b/receiver.c
+index d6a48f1..c0aa893 100644
+--- a/receiver.c
++++ b/receiver.c
+@@ -577,10 +577,13 @@ int recv_files(int f_in, int f_out, char *local_name)
+ if (DEBUG_GTE(RECV, 1))
+ rprintf(FINFO, "recv_files(%s)\n", fname);
+
+- if (daemon_filter_list.head && (*fname != '.' || fname[1] != '\0')
+- && check_filter(&daemon_filter_list, FLOG, fname, 0) < 0) {
+- rprintf(FERROR, "attempt to hack rsync failed.\n");
+- exit_cleanup(RERR_PROTOCOL);
++ if (daemon_filter_list.head && (*fname != '.' || fname[1] != '\0')) {
++ int filt_flags = S_ISDIR(file->mode) ? NAME_IS_DIR : NAME_IS_FILE;
++ if (check_filter(&daemon_filter_list, FLOG, fname, filt_flags) < 0) {
++ rprintf(FERROR, "ERROR: rejecting file transfer request for daemon excluded file: %s\n",
++ fname);
++ exit_cleanup(RERR_PROTOCOL);
++ }
+ }
+
+ #ifdef SUPPORT_XATTRS
+--
+2.30.2
+
diff --git a/meta/recipes-devtools/rsync/rsync_3.1.3.bb b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
index c743e3f75b..c744503227 100644
--- a/meta/recipes-devtools/rsync/rsync_3.1.3.bb
+++ b/meta/recipes-devtools/rsync/rsync_3.1.3.bb
@@ -16,6 +16,8 @@ SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \
file://CVE-2016-9841.patch \
file://CVE-2016-9842.patch \
file://CVE-2016-9843.patch \
+ file://CVE-2022-29154.patch \
+ file://0001-Fix-relative-when-copying-an-absolute-path.patch \
"
SRC_URI[md5sum] = "1581a588fde9d89f6bc6201e8129afaf"
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch b/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch
new file mode 100644
index 0000000000..cc2f9853db
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2021-33621.patch
@@ -0,0 +1,139 @@
+From 64c5045c0a6b84fdb938a8465a0890e5f7162708 Mon Sep 17 00:00:00 2001
+From: Yusuke Endoh <mame@ruby-lang.org>
+Date: Tue, 22 Nov 2022 10:49:27 +0900
+Subject: [PATCH] Prevent CRLF injection
+
+Throw a RuntimeError if the HTTP response header contains CR or LF to
+prevent HTTP response splitting.
+
+https://hackerone.com/reports/1204695
+
+Upstream-Status: Backport [https://github.com/ruby/cgi/commit/64c5045c0a6b84fdb938a8465a0890e5f7162708]
+CVE: CVE-2021-33621
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/cgi/core.rb | 45 +++++++++++++++++++++++--------------
+ test/cgi/test_cgi_header.rb | 8 +++++++
+ 2 files changed, 36 insertions(+), 17 deletions(-)
+
+diff --git a/lib/cgi/core.rb b/lib/cgi/core.rb
+index bec76e0..62e6068 100644
+--- a/lib/cgi/core.rb
++++ b/lib/cgi/core.rb
+@@ -188,17 +188,28 @@ class CGI
+ # Using #header with the HTML5 tag maker will create a <header> element.
+ alias :header :http_header
+
++ def _no_crlf_check(str)
++ if str
++ str = str.to_s
++ raise "A HTTP status or header field must not include CR and LF" if str =~ /[\r\n]/
++ str
++ else
++ nil
++ end
++ end
++ private :_no_crlf_check
++
+ def _header_for_string(content_type) #:nodoc:
+ buf = ''.dup
+ if nph?()
+- buf << "#{$CGI_ENV['SERVER_PROTOCOL'] || 'HTTP/1.0'} 200 OK#{EOL}"
++ buf << "#{_no_crlf_check($CGI_ENV['SERVER_PROTOCOL']) || 'HTTP/1.0'} 200 OK#{EOL}"
+ buf << "Date: #{CGI.rfc1123_date(Time.now)}#{EOL}"
+- buf << "Server: #{$CGI_ENV['SERVER_SOFTWARE']}#{EOL}"
++ buf << "Server: #{_no_crlf_check($CGI_ENV['SERVER_SOFTWARE'])}#{EOL}"
+ buf << "Connection: close#{EOL}"
+ end
+- buf << "Content-Type: #{content_type}#{EOL}"
++ buf << "Content-Type: #{_no_crlf_check(content_type)}#{EOL}"
+ if @output_cookies
+- @output_cookies.each {|cookie| buf << "Set-Cookie: #{cookie}#{EOL}" }
++ @output_cookies.each {|cookie| buf << "Set-Cookie: #{_no_crlf_check(cookie)}#{EOL}" }
+ end
+ return buf
+ end # _header_for_string
+@@ -213,9 +224,9 @@ class CGI
+ ## NPH
+ options.delete('nph') if defined?(MOD_RUBY)
+ if options.delete('nph') || nph?()
+- protocol = $CGI_ENV['SERVER_PROTOCOL'] || 'HTTP/1.0'
++ protocol = _no_crlf_check($CGI_ENV['SERVER_PROTOCOL']) || 'HTTP/1.0'
+ status = options.delete('status')
+- status = HTTP_STATUS[status] || status || '200 OK'
++ status = HTTP_STATUS[status] || _no_crlf_check(status) || '200 OK'
+ buf << "#{protocol} #{status}#{EOL}"
+ buf << "Date: #{CGI.rfc1123_date(Time.now)}#{EOL}"
+ options['server'] ||= $CGI_ENV['SERVER_SOFTWARE'] || ''
+@@ -223,38 +234,38 @@ class CGI
+ end
+ ## common headers
+ status = options.delete('status')
+- buf << "Status: #{HTTP_STATUS[status] || status}#{EOL}" if status
++ buf << "Status: #{HTTP_STATUS[status] || _no_crlf_check(status)}#{EOL}" if status
+ server = options.delete('server')
+- buf << "Server: #{server}#{EOL}" if server
++ buf << "Server: #{_no_crlf_check(server)}#{EOL}" if server
+ connection = options.delete('connection')
+- buf << "Connection: #{connection}#{EOL}" if connection
++ buf << "Connection: #{_no_crlf_check(connection)}#{EOL}" if connection
+ type = options.delete('type')
+- buf << "Content-Type: #{type}#{EOL}" #if type
++ buf << "Content-Type: #{_no_crlf_check(type)}#{EOL}" #if type
+ length = options.delete('length')
+- buf << "Content-Length: #{length}#{EOL}" if length
++ buf << "Content-Length: #{_no_crlf_check(length)}#{EOL}" if length
+ language = options.delete('language')
+- buf << "Content-Language: #{language}#{EOL}" if language
++ buf << "Content-Language: #{_no_crlf_check(language)}#{EOL}" if language
+ expires = options.delete('expires')
+ buf << "Expires: #{CGI.rfc1123_date(expires)}#{EOL}" if expires
+ ## cookie
+ if cookie = options.delete('cookie')
+ case cookie
+ when String, Cookie
+- buf << "Set-Cookie: #{cookie}#{EOL}"
++ buf << "Set-Cookie: #{_no_crlf_check(cookie)}#{EOL}"
+ when Array
+ arr = cookie
+- arr.each {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ arr.each {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ when Hash
+ hash = cookie
+- hash.each_value {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ hash.each_value {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ end
+ end
+ if @output_cookies
+- @output_cookies.each {|c| buf << "Set-Cookie: #{c}#{EOL}" }
++ @output_cookies.each {|c| buf << "Set-Cookie: #{_no_crlf_check(c)}#{EOL}" }
+ end
+ ## other headers
+ options.each do |key, value|
+- buf << "#{key}: #{value}#{EOL}"
++ buf << "#{_no_crlf_check(key)}: #{_no_crlf_check(value)}#{EOL}"
+ end
+ return buf
+ end # _header_for_hash
+diff --git a/test/cgi/test_cgi_header.rb b/test/cgi/test_cgi_header.rb
+index bab2d03..ec2f4de 100644
+--- a/test/cgi/test_cgi_header.rb
++++ b/test/cgi/test_cgi_header.rb
+@@ -176,6 +176,14 @@ class CGIHeaderTest < Test::Unit::TestCase
+ end
+
+
++ def test_cgi_http_header_crlf_injection
++ cgi = CGI.new
++ assert_raise(RuntimeError) { cgi.http_header("text/xhtml\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("type" => "text/xhtml\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("status" => "200 OK\r\nBOO") }
++ assert_raise(RuntimeError) { cgi.http_header("location" => "text/xhtml\r\nBOO") }
++ end
++
+
+ instance_methods.each do |method|
+ private method if method =~ /^test_(.*)/ && $1 != ENV['TEST']
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
new file mode 100644
index 0000000000..c25a147d36
--- /dev/null
+++ b/meta/recipes-devtools/ruby/ruby/CVE-2023-28756.patch
@@ -0,0 +1,61 @@
+From 957bb7cb81995f26c671afce0ee50a5c660e540e Mon Sep 17 00:00:00 2001
+From: Hiroshi SHIBATA <hsbt@ruby-lang.org>
+Date: Wed, 29 Mar 2023 13:28:25 +0900
+Subject: [PATCH] CVE-2023-28756
+
+CVE: CVE-2023-28756
+Upstream-Status: Backport [https://github.com/ruby/ruby/commit/957bb7cb81995f26c671afce0ee50a5c660e540e]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/time.rb | 6 +++---
+ test/test_time.rb | 9 +++++++++
+ 2 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/lib/time.rb b/lib/time.rb
+index f27bacd..4a86e8e 100644
+--- a/lib/time.rb
++++ b/lib/time.rb
+@@ -501,8 +501,8 @@ class Time
+ (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+
+ (\d{2,})\s+
+ (\d{2})\s*
+- :\s*(\d{2})\s*
+- (?::\s*(\d{2}))?\s+
++ :\s*(\d{2})
++ (?:\s*:\s*(\d\d))?\s+
+ ([+-]\d{4}|
+ UT|GMT|EST|EDT|CST|CDT|MST|MDT|PST|PDT|[A-IK-Z])/ix =~ date
+ # Since RFC 2822 permit comments, the regexp has no right anchor.
+@@ -717,7 +717,7 @@ class Time
+ #
+ # If self is a UTC time, Z is used as TZD. [+-]hh:mm is used otherwise.
+ #
+- # +fractional_digits+ specifies a number of digits to use for fractional
++ # +fraction_digits+ specifies a number of digits to use for fractional
+ # seconds. Its default value is 0.
+ #
+ # require 'time'
+diff --git a/test/test_time.rb b/test/test_time.rb
+index ca20788..4f11048 100644
+--- a/test/test_time.rb
++++ b/test/test_time.rb
+@@ -62,6 +62,15 @@ class TestTimeExtension < Test::Unit::TestCase # :nodoc:
+ assert_equal(true, t.utc?)
+ end
+
++ def test_rfc2822_nonlinear
++ pre = ->(n) {"0 Feb 00 00 :00" + " " * n}
++ assert_linear_performance([100, 500, 5000, 50_000], pre: pre) do |s|
++ assert_raise(ArgumentError) do
++ Time.rfc2822(s)
++ end
++ end
++ end
++
+ def test_encode_rfc2822
+ t = Time.utc(1)
+ assert_equal("Mon, 01 Jan 0001 00:00:00 -0000", t.rfc2822)
+--
+2.25.1
+
diff --git a/meta/recipes-devtools/ruby/ruby_2.7.5.bb b/meta/recipes-devtools/ruby/ruby_2.7.6.bb
index 44a2527ee7..7e6373bd24 100644
--- a/meta/recipes-devtools/ruby/ruby_2.7.5.bb
+++ b/meta/recipes-devtools/ruby/ruby_2.7.6.bb
@@ -7,10 +7,16 @@ SRC_URI += " \
file://run-ptest \
file://0001-Modify-shebang-of-libexec-y2racc-and-libexec-racc2y.patch \
file://0001-template-Makefile.in-do-not-write-host-cross-cc-item.patch \
+ file://CVE-2023-28756.patch \
+ file://CVE-2021-33621.patch \
"
-SRC_URI[md5sum] = "ede247b56fb862f1f67f9471189b04d4"
-SRC_URI[sha256sum] = "2755b900a21235b443bb16dadd9032f784d4a88f143d852bc5d154f22b8781f1"
+SRC_URI[md5sum] = "f972fb0cce662966bec10d5c5f32d042"
+SRC_URI[sha256sum] = "e7203b0cc09442ed2c08936d483f8ac140ec1c72e37bb5c401646b7866cb5d10"
+
+# CVE-2021-28966 is Windows specific and not affects Linux OS
+# https://security-tracker.debian.org/tracker/CVE-2021-28966
+CVE_CHECK_WHITELIST += "CVE-2021-28966"
PACKAGECONFIG ??= ""
PACKAGECONFIG += "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
diff --git a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
index 7f72f3388a..b6b81d5c1a 100644
--- a/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
+++ b/meta/recipes-devtools/run-postinsts/run-postinsts/run-postinsts.service
@@ -1,7 +1,7 @@
[Unit]
Description=Run pending postinsts
DefaultDependencies=no
-After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount
+After=systemd-remount-fs.service systemd-tmpfiles-setup.service tmp.mount ldconfig.service
Before=sysinit.target
[Service]
diff --git a/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch b/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch
new file mode 100644
index 0000000000..030ead6c66
--- /dev/null
+++ b/meta/recipes-devtools/subversion/subversion/CVE-2021-28544.patch
@@ -0,0 +1,146 @@
+From 61382fd8ea66000bd9ee8e203a6eab443220ee40 Mon Sep 17 00:00:00 2001
+From: Nathan Hartman <hartmannathan@apache.org>
+Date: Sun, 27 Mar 2022 05:59:18 +0000
+Subject: [PATCH] On the 1.14.x-r1899227 branch: Merge r1899227 from trunk
+ w/testlist variation
+
+git-svn-id: https://svn.apache.org/repos/asf/subversion/branches/1.14.x-r1899227@1899229 13f79535-47bb-0310-9956-ffa450edef68
+
+CVE: CVE-2021-28544 [https://github.com/apache/subversion/commit/61382fd8ea66000bd9ee8e203a6eab443220ee40]
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ subversion/libsvn_repos/log.c | 26 +++++-------
+ subversion/tests/cmdline/authz_tests.py | 55 +++++++++++++++++++++++++
+ 2 files changed, 65 insertions(+), 16 deletions(-)
+
+diff --git a/subversion/libsvn_repos/log.c b/subversion/libsvn_repos/log.c
+index d9a1fb1085e16..41ca8aed27174 100644
+--- a/subversion/libsvn_repos/log.c
++++ b/subversion/libsvn_repos/log.c
+@@ -337,42 +337,36 @@ detect_changed(svn_repos_revision_access_level_t *access_level,
+ if ( (change->change_kind == svn_fs_path_change_add)
+ || (change->change_kind == svn_fs_path_change_replace))
+ {
+- const char *copyfrom_path = change->copyfrom_path;
+- svn_revnum_t copyfrom_rev = change->copyfrom_rev;
+-
+ /* the following is a potentially expensive operation since on FSFS
+ we will follow the DAG from ROOT to PATH and that requires
+ actually reading the directories along the way. */
+ if (!change->copyfrom_known)
+ {
+- SVN_ERR(svn_fs_copied_from(&copyfrom_rev, &copyfrom_path,
++ SVN_ERR(svn_fs_copied_from(&change->copyfrom_rev, &change->copyfrom_path,
+ root, path, iterpool));
+ change->copyfrom_known = TRUE;
+ }
+
+- if (copyfrom_path && SVN_IS_VALID_REVNUM(copyfrom_rev))
++ if (change->copyfrom_path && SVN_IS_VALID_REVNUM(change->copyfrom_rev))
+ {
+- svn_boolean_t readable = TRUE;
+-
+ if (callbacks->authz_read_func)
+ {
+ svn_fs_root_t *copyfrom_root;
++ svn_boolean_t readable;
+
+ SVN_ERR(svn_fs_revision_root(&copyfrom_root, fs,
+- copyfrom_rev, iterpool));
++ change->copyfrom_rev, iterpool));
+ SVN_ERR(callbacks->authz_read_func(&readable,
+ copyfrom_root,
+- copyfrom_path,
++ change->copyfrom_path,
+ callbacks->authz_read_baton,
+ iterpool));
+ if (! readable)
+- found_unreadable = TRUE;
+- }
+-
+- if (readable)
+- {
+- change->copyfrom_path = copyfrom_path;
+- change->copyfrom_rev = copyfrom_rev;
++ {
++ found_unreadable = TRUE;
++ change->copyfrom_path = NULL;
++ change->copyfrom_rev = SVN_INVALID_REVNUM;
++ }
+ }
+ }
+ }
+diff --git a/subversion/tests/cmdline/authz_tests.py b/subversion/tests/cmdline/authz_tests.py
+index 760cb3663d02f..92e8a5e1935c9 100755
+--- a/subversion/tests/cmdline/authz_tests.py
++++ b/subversion/tests/cmdline/authz_tests.py
+@@ -1731,6 +1731,60 @@ def empty_group(sbox):
+ '--username', svntest.main.wc_author,
+ sbox.repo_url)
+
++@Skip(svntest.main.is_ra_type_file)
++def log_inaccessible_copyfrom(sbox):
++ "log doesn't leak inaccessible copyfrom paths"
++
++ sbox.build(empty=True)
++ sbox.simple_add_text('secret', 'private')
++ sbox.simple_commit(message='log message for r1')
++ sbox.simple_copy('private', 'public')
++ sbox.simple_commit(message='log message for r2')
++
++ svntest.actions.enable_revprop_changes(sbox.repo_dir)
++ # Remove svn:date and svn:author for predictable output.
++ svntest.actions.run_and_verify_svn(None, [], 'propdel', '--revprop',
++ '-r2', 'svn:date', sbox.repo_url)
++ svntest.actions.run_and_verify_svn(None, [], 'propdel', '--revprop',
++ '-r2', 'svn:author', sbox.repo_url)
++
++ write_restrictive_svnserve_conf(sbox.repo_dir)
++
++ # First test with blanket access.
++ write_authz_file(sbox,
++ {"/" : "* = rw"})
++ expected_output = svntest.verify.ExpectedOutput([
++ "------------------------------------------------------------------------\n",
++ "r2 | (no author) | (no date) | 1 line\n",
++ "Changed paths:\n",
++ " A /public (from /private:1)\n",
++ "\n",
++ "log message for r2\n",
++ "------------------------------------------------------------------------\n",
++ ])
++ svntest.actions.run_and_verify_svn(expected_output, [],
++ 'log', '-r2', '-v',
++ sbox.repo_url)
++
++ # Now test with an inaccessible copy source (/private).
++ write_authz_file(sbox,
++ {"/" : "* = rw"},
++ {"/private" : "* ="})
++ expected_output = svntest.verify.ExpectedOutput([
++ "------------------------------------------------------------------------\n",
++ "r2 | (no author) | (no date) | 1 line\n",
++ "Changed paths:\n",
++ # The copy is shown as a plain add with no copyfrom info.
++ " A /public\n",
++ "\n",
++ # No log message, as the revision is only partially visible.
++ "\n",
++ "------------------------------------------------------------------------\n",
++ ])
++ svntest.actions.run_and_verify_svn(expected_output, [],
++ 'log', '-r2', '-v',
++ sbox.repo_url)
++
+
+ ########################################################################
+ # Run the tests
+@@ -1771,6 +1825,7 @@ def empty_group(sbox):
+ inverted_group_membership,
+ group_member_empty_string,
+ empty_group,
++ log_inaccessible_copyfrom,
+ ]
+ serial_only = True
+
diff --git a/meta/recipes-devtools/subversion/subversion_1.13.0.bb b/meta/recipes-devtools/subversion/subversion_1.13.0.bb
index 34c0dbe5b8..5643191569 100644
--- a/meta/recipes-devtools/subversion/subversion_1.13.0.bb
+++ b/meta/recipes-devtools/subversion/subversion_1.13.0.bb
@@ -13,6 +13,7 @@ SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://0001-Fix-libtool-name-in-configure.ac.patch \
file://serfmacro.patch \
file://CVE-2020-17525.patch \
+ file://CVE-2021-28544.patch \
"
SRC_URI[md5sum] = "3004b4dae18bf45a0b6ea4ef8820064d"
diff --git a/meta/recipes-devtools/valgrind/valgrind/remove-for-all b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
index 9b8db093df..88a11ca332 100644
--- a/meta/recipes-devtools/valgrind/valgrind/remove-for-all
+++ b/meta/recipes-devtools/valgrind/valgrind/remove-for-all
@@ -1,3 +1,4 @@
drd/tests/bar_bad
drd/tests/bar_bad_xml
gdbserver_tests/hginfo
+memcheck/tests/linux/timerfd-syscall
diff --git a/meta/recipes-extended/bc/bc_1.07.1.bb b/meta/recipes-extended/bc/bc_1.07.1.bb
index ff3e8f4409..8ed10d14c2 100644
--- a/meta/recipes-extended/bc/bc_1.07.1.bb
+++ b/meta/recipes-extended/bc/bc_1.07.1.bb
@@ -32,4 +32,4 @@ do_compile_prepend() {
ALTERNATIVE_${PN} = "bc dc"
ALTERNATIVE_PRIORITY = "100"
-BBCLASSEXTEND = "native"
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch b/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch
new file mode 100644
index 0000000000..2dfd348d7c
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.13/0003-Fix-calculation-of-CRC-in-copy-out-mode.patch
@@ -0,0 +1,58 @@
+From d257e47a6c6b41ba727b196ac96c05ab91bd9d65 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Fri, 7 Apr 2023 11:23:37 +0300
+Subject: [PATCH 3/4] Fix calculation of CRC in copy-out mode.
+
+* src/copyout.c (read_for_checksum): Fix type of the file_size argument.
+Rewrite the reading loop.
+
+Original patch by Stefano Babic <sbabic@denx.de>
+
+Upstream-Status: Backport [a1b2f7871c3ae5113e0102b870b15ea06a8f0e3d]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ src/copyout.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/src/copyout.c b/src/copyout.c
+index 8b0beb6..f1ff351 100644
+--- a/src/copyout.c
++++ b/src/copyout.c
+@@ -34,27 +34,25 @@
+ compute and return a checksum for them. */
+
+ static uint32_t
+-read_for_checksum (int in_file_des, int file_size, char *file_name)
++read_for_checksum (int in_file_des, off_t file_size, char *file_name)
+ {
+ uint32_t crc;
+- char buf[BUFSIZ];
+- int bytes_left;
+- int bytes_read;
+- int i;
++ unsigned char buf[BUFSIZ];
++ ssize_t bytes_read;
++ ssize_t i;
+
+ crc = 0;
+
+- for (bytes_left = file_size; bytes_left > 0; bytes_left -= bytes_read)
++ while (file_size > 0)
+ {
+ bytes_read = read (in_file_des, buf, BUFSIZ);
+ if (bytes_read < 0)
+ error (PAXEXIT_FAILURE, errno, _("cannot read checksum for %s"), file_name);
+ if (bytes_read == 0)
+ break;
+- if (bytes_left < bytes_read)
+- bytes_read = bytes_left;
+- for (i = 0; i < bytes_read; ++i)
++ for (i = 0; i < bytes_read; i++)
+ crc += buf[i] & 0xff;
++ file_size -= bytes_read;
+ }
+ if (lseek (in_file_des, 0L, SEEK_SET))
+ error (PAXEXIT_FAILURE, errno, _("cannot read checksum for %s"), file_name);
+--
+2.39.2
+
diff --git a/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch b/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch
new file mode 100644
index 0000000000..c212bddf7d
--- /dev/null
+++ b/meta/recipes-extended/cpio/cpio-2.13/0004-Fix-appending-to-archives-bigger-than-2G.patch
@@ -0,0 +1,312 @@
+From 8513495ab5cfb63eb7c4c933fdf0b78c6196cd27 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Fri, 28 Apr 2023 15:23:46 +0300
+Subject: [PATCH 4/4] Fix appending to archives bigger than 2G
+
+* src/extern.h (last_header_start): Change type to off_t.
+* src/global.c: Likewise.
+* src/util.c (prepare_append): Use off_t for file offsets.
+
+Upstream-Status: Backport [0987d63384f0419b4b14aecdc6a61729b75ce86a]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ src/extern.h | 11 ++++-----
+ src/global.c | 2 +-
+ src/util.c | 66 ++++++++++++++++++++++++++--------------------------
+ 3 files changed, 39 insertions(+), 40 deletions(-)
+
+diff --git a/src/extern.h b/src/extern.h
+index 11ac6bf..12f14a9 100644
+--- a/src/extern.h
++++ b/src/extern.h
+@@ -67,7 +67,7 @@ extern int ignore_devno_option;
+
+ extern bool to_stdout_option;
+
+-extern int last_header_start;
++extern off_t last_header_start;
+ extern int copy_matching_files;
+ extern int numeric_uid;
+ extern char *pattern_file_name;
+@@ -123,7 +123,7 @@ void field_width_error (const char *filename, const char *fieldname,
+
+ /* copypass.c */
+ void process_copy_pass (void);
+-int link_to_maj_min_ino (char *file_name, int st_dev_maj,
++int link_to_maj_min_ino (char *file_name, int st_dev_maj,
+ int st_dev_min, ino_t st_ino);
+ int link_to_name (char const *link_name, char const *link_target);
+
+@@ -171,7 +171,7 @@ void copy_files_tape_to_disk (int in_des, int out_des, off_t num_bytes);
+ void copy_files_disk_to_tape (int in_des, int out_des, off_t num_bytes, char *filename);
+ void copy_files_disk_to_disk (int in_des, int out_des, off_t num_bytes, char *filename);
+ void warn_if_file_changed (char *file_name, off_t old_file_size,
+- time_t old_file_mtime);
++ time_t old_file_mtime);
+ void create_all_directories (char const *name);
+ void prepare_append (int out_file_des);
+ char *find_inode_file (ino_t node_num,
+@@ -185,7 +185,7 @@ void set_new_media_message (char *message);
+ #ifdef HPUX_CDF
+ char *add_cdf_double_slashes (char *filename);
+ #endif
+-void write_nuls_to_file (off_t num_bytes, int out_des,
++void write_nuls_to_file (off_t num_bytes, int out_des,
+ void (*writer) (char *in_buf,
+ int out_des, off_t num_bytes));
+ #define DISK_IO_BLOCK_SIZE 512
+@@ -229,6 +229,5 @@ void delay_set_stat (char const *file_name, struct stat *st,
+ mode_t invert_permissions);
+ int repair_delayed_set_stat (struct cpio_file_stat *file_hdr);
+ void apply_delayed_set_stat (void);
+-
+-int arf_stores_inode_p (enum archive_format arf);
+
++int arf_stores_inode_p (enum archive_format arf);
+diff --git a/src/global.c b/src/global.c
+index fb3abe9..5c9fc05 100644
+--- a/src/global.c
++++ b/src/global.c
+@@ -114,7 +114,7 @@ int debug_flag = false;
+
+ /* File position of last header read. Only used during -A to determine
+ where the old TRAILER!!! record started. */
+-int last_header_start = 0;
++off_t last_header_start = 0;
+
+ /* With -i; if true, copy only files that match any of the given patterns;
+ if false, copy only files that do not match any of the patterns. (-f) */
+diff --git a/src/util.c b/src/util.c
+index 4421b20..3be89a4 100644
+--- a/src/util.c
++++ b/src/util.c
+@@ -60,8 +60,8 @@ tape_empty_output_buffer (int out_des)
+ static long output_bytes_before_lseek = 0;
+
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 2Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 2Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (output_is_special
+ && ( (output_bytes_before_lseek += output_size) >= 1073741824L) )
+@@ -106,7 +106,7 @@ static ssize_t sparse_write (int fildes, char *buf, size_t nbyte, bool flush);
+ descriptor OUT_DES and reset `output_size' and `out_buff'.
+ If `swapping_halfwords' or `swapping_bytes' is set,
+ do the appropriate swapping first. Our callers have
+- to make sure to only set these flags if `output_size'
++ to make sure to only set these flags if `output_size'
+ is appropriate (a multiple of 4 for `swapping_halfwords',
+ 2 for `swapping_bytes'). The fact that DISK_IO_BLOCK_SIZE
+ must always be a multiple of 4 helps us (and our callers)
+@@ -188,8 +188,8 @@ tape_fill_input_buffer (int in_des, int num_bytes)
+ {
+ #ifdef BROKEN_LONG_TAPE_DRIVER
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (input_is_special
+ && ( (input_bytes_before_lseek += num_bytes) >= 1073741824L) )
+@@ -332,8 +332,8 @@ tape_buffered_peek (char *peek_buf, int in_des, int num_bytes)
+
+ #ifdef BROKEN_LONG_TAPE_DRIVER
+ /* Some tape drivers seem to have a signed internal seek pointer and
+- they lose if it overflows and becomes negative (e.g. when writing
+- tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
++ they lose if it overflows and becomes negative (e.g. when writing
++ tapes > 4Gb). Doing an lseek (des, 0, SEEK_SET) seems to reset the
+ seek pointer and prevent it from overflowing. */
+ if (input_is_special
+ && ( (input_bytes_before_lseek += num_bytes) >= 1073741824L) )
+@@ -404,7 +404,7 @@ tape_toss_input (int in_des, off_t num_bytes)
+
+ if (crc_i_flag && only_verify_crc_flag)
+ {
+- int k;
++ int k;
+ for (k = 0; k < space_left; ++k)
+ crc += in_buff[k] & 0xff;
+ }
+@@ -416,14 +416,14 @@ tape_toss_input (int in_des, off_t num_bytes)
+ }
+
+ void
+-write_nuls_to_file (off_t num_bytes, int out_des,
+- void (*writer) (char *in_buf, int out_des, off_t num_bytes))
++write_nuls_to_file (off_t num_bytes, int out_des,
++ void (*writer) (char *in_buf, int out_des, off_t num_bytes))
+ {
+ off_t blocks;
+ off_t extra_bytes;
+ off_t i;
+ static char zeros_512[512];
+-
++
+ blocks = num_bytes / sizeof zeros_512;
+ extra_bytes = num_bytes % sizeof zeros_512;
+ for (i = 0; i < blocks; ++i)
+@@ -603,7 +603,7 @@ create_all_directories (char const *name)
+ char *dir;
+
+ dir = dir_name (name);
+-
++
+ if (dir == NULL)
+ error (PAXEXIT_FAILURE, 0, _("virtual memory exhausted"));
+
+@@ -637,9 +637,9 @@ create_all_directories (char const *name)
+ void
+ prepare_append (int out_file_des)
+ {
+- int start_of_header;
+- int start_of_block;
+- int useful_bytes_in_block;
++ off_t start_of_header;
++ off_t start_of_block;
++ size_t useful_bytes_in_block;
+ char *tmp_buf;
+
+ start_of_header = last_header_start;
+@@ -697,8 +697,8 @@ inode_val_compare (const void *val1, const void *val2)
+ const struct inode_val *ival1 = val1;
+ const struct inode_val *ival2 = val2;
+ return ival1->inode == ival2->inode
+- && ival1->major_num == ival2->major_num
+- && ival1->minor_num == ival2->minor_num;
++ && ival1->major_num == ival2->major_num
++ && ival1->minor_num == ival2->minor_num;
+ }
+
+ static struct inode_val *
+@@ -706,10 +706,10 @@ find_inode_val (ino_t node_num, unsigned long major_num,
+ unsigned long minor_num)
+ {
+ struct inode_val sample;
+-
++
+ if (!hash_table)
+ return NULL;
+-
++
+ sample.inode = node_num;
+ sample.major_num = major_num;
+ sample.minor_num = minor_num;
+@@ -734,7 +734,7 @@ add_inode (ino_t node_num, char *file_name, unsigned long major_num,
+ {
+ struct inode_val *temp;
+ struct inode_val *e = NULL;
+-
++
+ /* Create new inode record. */
+ temp = (struct inode_val *) xmalloc (sizeof (struct inode_val));
+ temp->inode = node_num;
+@@ -1007,7 +1007,7 @@ buf_all_zeros (char *buf, int bufsize)
+
+ /* Write NBYTE bytes from BUF to file descriptor FILDES, trying to
+ create holes instead of writing blockfuls of zeros.
+-
++
+ Return the number of bytes written (including bytes in zero
+ regions) on success, -1 on error.
+
+@@ -1027,7 +1027,7 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+
+ enum { begin, in_zeros, not_in_zeros } state =
+ delayed_seek_count ? in_zeros : begin;
+-
++
+ while (nbytes)
+ {
+ size_t rest = nbytes;
+@@ -1042,7 +1042,7 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+ if (state == not_in_zeros)
+ {
+ ssize_t bytes = buf - start_ptr + rest;
+-
++
+ n = write (fildes, start_ptr, bytes);
+ if (n == -1)
+ return -1;
+@@ -1091,8 +1091,8 @@ sparse_write (int fildes, char *buf, size_t nbytes, bool flush)
+ if (n != 1)
+ return n;
+ delayed_seek_count = 0;
+- }
+-
++ }
++
+ return nwritten + seek_count;
+ }
+
+@@ -1222,7 +1222,7 @@ set_perms (int fd, struct cpio_file_stat *header)
+ if (!no_chown_flag)
+ {
+ uid_t uid = CPIO_UID (header->c_uid);
+- gid_t gid = CPIO_GID (header->c_gid);
++ gid_t gid = CPIO_GID (header->c_gid);
+ if ((fchown_or_chown (fd, header->c_name, uid, gid) < 0)
+ && errno != EPERM)
+ chown_error_details (header->c_name, uid, gid);
+@@ -1239,13 +1239,13 @@ set_file_times (int fd,
+ const char *name, unsigned long atime, unsigned long mtime)
+ {
+ struct timespec ts[2];
+-
++
+ memset (&ts, 0, sizeof ts);
+
+ ts[0].tv_sec = atime;
+ ts[1].tv_sec = mtime;
+
+- /* Silently ignore EROFS because reading the file won't have upset its
++ /* Silently ignore EROFS because reading the file won't have upset its
+ timestamp if it's on a read-only filesystem. */
+ if (fdutimens (fd, name, ts) < 0 && errno != EROFS)
+ utime_error (name);
+@@ -1297,7 +1297,7 @@ cpio_safer_name_suffix (char *name, bool link_target, bool absolute_names,
+
+ /* This is a simplified form of delayed set_stat used by GNU tar.
+ With the time, both forms will merge and pass to paxutils
+-
++
+ List of directories whose statuses we need to extract after we've
+ finished extracting their subsidiary files. If you consider each
+ contiguous subsequence of elements of the form [D]?[^D]*, where [D]
+@@ -1415,7 +1415,7 @@ cpio_mkdir (struct cpio_file_stat *file_hdr, int *setstat_delayed)
+ {
+ int rc;
+ mode_t mode = file_hdr->c_mode;
+-
++
+ if (!(file_hdr->c_mode & S_IWUSR))
+ {
+ rc = mkdir (file_hdr->c_name, mode | S_IWUSR);
+@@ -1438,10 +1438,10 @@ cpio_create_dir (struct cpio_file_stat *file_hdr, int existing_dir)
+ {
+ int res; /* Result of various function calls. */
+ int setstat_delayed = 0;
+-
++
+ if (to_stdout_option)
+ return 0;
+-
++
+ /* Strip any trailing `/'s off the filename; tar puts
+ them on. We might as well do it here in case anybody
+ else does too, since they cause strange things to happen. */
+@@ -1530,7 +1530,7 @@ arf_stores_inode_p (enum archive_format arf)
+ }
+ return 1;
+ }
+-
++
+ void
+ cpio_file_stat_init (struct cpio_file_stat *file_hdr)
+ {
+--
+2.39.2
+
diff --git a/meta/recipes-extended/cpio/cpio_2.13.bb b/meta/recipes-extended/cpio/cpio_2.13.bb
index 7c8a465cd0..5ab567f360 100644
--- a/meta/recipes-extended/cpio/cpio_2.13.bb
+++ b/meta/recipes-extended/cpio/cpio_2.13.bb
@@ -10,6 +10,8 @@ SRC_URI = "${GNU_MIRROR}/cpio/cpio-${PV}.tar.gz \
file://0001-Unset-need_charset_alias-when-building-for-musl.patch \
file://0002-src-global.c-Remove-superfluous-declaration-of-progr.patch \
file://CVE-2021-38185.patch \
+ file://0003-Fix-calculation-of-CRC-in-copy-out-mode.patch \
+ file://0004-Fix-appending-to-archives-bigger-than-2G.patch \
"
SRC_URI[md5sum] = "389c5452d667c23b5eceb206f5000810"
diff --git a/meta/recipes-extended/cups/cups.inc b/meta/recipes-extended/cups/cups.inc
index 15f46937e1..6cfe314f20 100644
--- a/meta/recipes-extended/cups/cups.inc
+++ b/meta/recipes-extended/cups/cups.inc
@@ -13,6 +13,11 @@ SRC_URI = "https://github.com/apple/cups/releases/download/v${PV}/${BP}-source.t
file://0002-don-t-try-to-run-generated-binaries.patch \
file://0003-cups_1.4.6.bb-Fix-build-on-ppc64.patch \
file://0004-cups-fix-multilib-install-file-conflicts.patch\
+ file://CVE-2022-26691.patch \
+ file://CVE-2023-32324.patch \
+ file://CVE-2023-34241.patch \
+ file://CVE-2023-32360.patch \
+ file://CVE-2023-4504.patch \
"
UPSTREAM_CHECK_URI = "https://github.com/apple/cups/releases"
@@ -119,4 +124,4 @@ cups_sysroot_preprocess () {
# -25317 concerns /var/log/cups having lp ownership. Our /var/log/cups is
# root:root, so this doesn't apply.
-CVE_CHECK_WHITELIST += "CVE-2021-25317" \ No newline at end of file
+CVE_CHECK_WHITELIST += "CVE-2021-25317"
diff --git a/meta/recipes-extended/cups/cups/CVE-2022-26691.patch b/meta/recipes-extended/cups/cups/CVE-2022-26691.patch
new file mode 100644
index 0000000000..1fa5a54c70
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2022-26691.patch
@@ -0,0 +1,33 @@
+From de4f8c196106033e4c372dce3e91b9d42b0b9444 Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Thu, 26 May 2022 06:27:04 +0200
+Subject: [PATCH] scheduler/cert.c: Fix string comparison (fixes
+ CVE-2022-26691)
+
+The previous algorithm didn't expect the strings can have a different
+length, so one string can be a substring of the other and such substring
+was reported as equal to the longer string.
+
+CVE: CVE-2022-26691
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/de4f8c196106033e4c372dce3e91b9d42b0b9444]
+Signed-off-by: Steve Sakoman
+
+---
+diff --git a/scheduler/cert.c b/scheduler/cert.c
+index b268bf1b2..9b65b96c9 100644
+--- a/scheduler/cert.c
++++ b/scheduler/cert.c
+@@ -434,5 +434,12 @@ ctcompare(const char *a, /* I - First string */
+ b ++;
+ }
+
+- return (result);
++ /*
++ * The while loop finishes when *a == '\0' or *b == '\0'
++ * so after the while loop either both *a and *b == '\0',
++ * or one points inside a string, so when we apply logical OR on *a,
++ * *b and result, we get a non-zero return value if the compared strings don't match.
++ */
++
++ return (result | *a | *b);
+ }
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32324.patch b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
new file mode 100644
index 0000000000..40b89c9899
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32324.patch
@@ -0,0 +1,36 @@
+From 07cbffd11107eed3aaf1c64e35552aec20f792da Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Thu, 1 Jun 2023 12:04:00 +0200
+Subject: [PATCH] cups/string.c: Return if `size` is 0 (fixes CVE-2023-32324)
+
+CVE: CVE-2023-32324
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/fd8bc2d32589]
+
+(cherry picked from commit fd8bc2d32589d1fd91fe1c0521be2a7c0462109e)
+Signed-off-by: Sanjay Chitroda <schitrod@cisco.com>
+---
+ cups/string.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/cups/string.c b/cups/string.c
+index 93cdad19..6ef58515 100644
+--- a/cups/string.c
++++ b/cups/string.c
+@@ -1,6 +1,7 @@
+ /*
+ * String functions for CUPS.
+ *
++ * Copyright © 2023 by OpenPrinting.
+ * Copyright © 2007-2019 by Apple Inc.
+ * Copyright © 1997-2007 by Easy Software Products.
+ *
+@@ -730,6 +731,9 @@ _cups_strlcpy(char *dst, /* O - Destination string */
+ size_t srclen; /* Length of source string */
+
+
++ if (size == 0)
++ return (0);
++
+ /*
+ * Figure out how much room is needed...
+ */
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-32360.patch b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
new file mode 100644
index 0000000000..4d39e1e57f
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-32360.patch
@@ -0,0 +1,31 @@
+From a0c8b9c9556882f00c68b9727a95a1b6d1452913 Mon Sep 17 00:00:00 2001
+From: Michael R Sweet <michael.r.sweet@gmail.com>
+Date: Tue, 6 Dec 2022 09:04:01 -0500
+Subject: [PATCH] Require authentication for CUPS-Get-Document.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/a0c8b9c9556882f00c68b9727a95a1b6d1452913]
+CVE: CVE-2023-32360
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ conf/cupsd.conf.in | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/conf/cupsd.conf.in b/conf/cupsd.conf.in
+index b258849078..a07536f3e4 100644
+--- a/conf/cupsd.conf.in
++++ b/conf/cupsd.conf.in
+@@ -68,7 +68,13 @@ IdleExitTimeout @EXIT_TIMEOUT@
+ Order deny,allow
+ </Limit>
+
+- <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
++ <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job>
++ Require user @OWNER @SYSTEM
++ Order deny,allow
++ </Limit>
++
++ <Limit CUPS-Get-Document>
++ AuthType Default
+ Require user @OWNER @SYSTEM
+ Order deny,allow
+ </Limit>
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-34241.patch b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
new file mode 100644
index 0000000000..816efc2946
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-34241.patch
@@ -0,0 +1,65 @@
+From ffd290b4ab247f82722927ba9b21358daa16dbf1 Mon Sep 17 00:00:00 2001
+From: Rose <83477269+AtariDreams@users.noreply.github.com>
+Date: Thu, 1 Jun 2023 11:33:39 -0400
+Subject: [PATCH] Log result of httpGetHostname BEFORE closing the connection
+
+httpClose frees the memory of con->http. This is problematic because httpGetHostname then tries to access the memory it points to.
+
+We have to log the hostname first.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/9809947a959e18409dcf562a3466ef246cb90cb2]
+CVE: CVE-2023-34241
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ scheduler/client.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/scheduler/client.c b/scheduler/client.c
+index 91e441188c..327473a4d1 100644
+--- a/scheduler/client.c
++++ b/scheduler/client.c
+@@ -193,13 +193,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ /*
+ * Can't have an unresolved IP address with double-lookups enabled...
+ */
+-
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "Name lookup failed - connection from %s closed!",
++ "Name lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
+
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -235,11 +233,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+ * with double-lookups enabled...
+ */
+
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+- "IP lookup failed - connection from %s closed!",
++ "IP lookup failed - closing connection from %s!",
+ httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
+@@ -256,11 +254,11 @@ cupsdAcceptClient(cupsd_listener_t *lis)/* I - Listener socket */
+
+ if (!hosts_access(&wrap_req))
+ {
+- httpClose(con->http);
+-
+ cupsdLogClient(con, CUPSD_LOG_WARN,
+ "Connection from %s refused by /etc/hosts.allow and "
+ "/etc/hosts.deny rules.", httpGetHostname(con->http, NULL, 0));
++
++ httpClose(con->http);
+ free(con);
+ return;
+ }
diff --git a/meta/recipes-extended/cups/cups/CVE-2023-4504.patch b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
new file mode 100644
index 0000000000..be0db1fbd4
--- /dev/null
+++ b/meta/recipes-extended/cups/cups/CVE-2023-4504.patch
@@ -0,0 +1,40 @@
+From a9a7daa77699bd58001c25df8a61a8029a217ddf Mon Sep 17 00:00:00 2001
+From: Zdenek Dohnal <zdohnal@redhat.com>
+Date: Fri, 1 Sep 2023 16:47:29 +0200
+Subject: [PATCH] raster-interpret.c: Fix CVE-2023-4504
+
+We didn't check for end of buffer if it looks there is an escaped
+character - check for NULL terminator there and if found, return NULL
+as return value and in `ptr`, because a lone backslash is not
+a valid PostScript character.
+
+Upstream-Status: Backport [https://github.com/OpenPrinting/cups/commit/2431caddb7e6a87f04ac90b5c6366ad268b6ff31]
+CVE: CVE-2023-4504
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ cups/raster-interpret.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+--- a/cups/raster-interpret.c
++++ b/cups/raster-interpret.c
+@@ -1113,7 +1113,19 @@ scan_ps(_cups_ps_stack_t *st, /* I - S
+
+ cur ++;
+
+- if (*cur == 'b')
++ /*
++ * Return NULL if we reached NULL terminator, a lone backslash
++ * is not a valid character in PostScript.
++ */
++
++ if (!*cur)
++ {
++ *ptr = NULL;
++
++ return (NULL);
++ }
++
++ if (*cur == 'b')
+ *valptr++ = '\b';
+ else if (*cur == 'f')
+ *valptr++ = '\f';
diff --git a/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
new file mode 100644
index 0000000000..c6cba058a7
--- /dev/null
+++ b/meta/recipes-extended/gawk/gawk/CVE-2023-4156.patch
@@ -0,0 +1,28 @@
+From e709eb829448ce040087a3fc5481db6bfcaae212 Mon Sep 17 00:00:00 2001
+From: "Arnold D. Robbins" <arnold@skeeve.com>
+Date: Wed, 3 Aug 2022 13:00:54 +0300
+Subject: [PATCH] Smal bug fix in builtin.c.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/gawk/tree/debian/patches/CVE-2023-4156.patch?h=ubuntu/focal-security
+Upstream commit https://git.savannah.gnu.org/gitweb/?p=gawk.git;a=commitdiff;h=e709eb829448ce040087a3fc5481db6bfcaae212]
+CVE: CVE-2023-4156
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ChangeLog | 6 ++++++
+ builtin.c | 5 ++++-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- gawk-5.1.0.orig/builtin.c
++++ gawk-5.1.0/builtin.c
+@@ -957,7 +957,10 @@ check_pos:
+ s1++;
+ n0--;
+ }
+- if (val >= num_args) {
++ // val could be less than zero if someone provides a field width
++ // so large that it causes integer overflow. Mainly fuzzers do this,
++ // but let's try to be good anyway.
++ if (val < 0 || val >= num_args) {
+ toofew = true;
+ break;
+ }
diff --git a/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch b/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch
new file mode 100644
index 0000000000..167c0787ee
--- /dev/null
+++ b/meta/recipes-extended/gawk/gawk/remove-sensitive-tests.patch
@@ -0,0 +1,24 @@
+These tests require an unloaded host as otherwise timing sensitive tests can fail
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=14371
+
+Upstream-Status: Inappropriate
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+
+--- a/test/Maketests~
++++ b/test/Maketests
+@@ -2069,7 +2069,2 @@
+
+-timeout:
+- @echo $@ $(ZOS_FAIL)
+- @AWKPATH="$(srcdir)" $(AWK) -f $@.awk >_$@ 2>&1 || echo EXIT CODE: $$? >>_$@
+- @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+-
+ typedregex1:
+@@ -2297,7 +2292,2 @@
+ @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+-
+-time:
+- @echo $@
+- @AWKPATH="$(srcdir)" $(AWK) -f $@.awk >_$@ 2>&1 || echo EXIT CODE: $$? >>_$@
+- @-$(CMP) "$(srcdir)"/$@.ok _$@ && rm -f _$@
+
diff --git a/meta/recipes-extended/gawk/gawk_5.0.1.bb b/meta/recipes-extended/gawk/gawk_5.0.1.bb
index e79ccfdebf..c71890c19e 100644
--- a/meta/recipes-extended/gawk/gawk_5.0.1.bb
+++ b/meta/recipes-extended/gawk/gawk_5.0.1.bb
@@ -16,7 +16,9 @@ PACKAGECONFIG[readline] = "--with-readline,--without-readline,readline"
PACKAGECONFIG[mpfr] = "--with-mpfr,--without-mpfr, mpfr"
SRC_URI = "${GNU_MIRROR}/gawk/gawk-${PV}.tar.gz \
+ file://remove-sensitive-tests.patch \
file://run-ptest \
+ file://CVE-2023-4156.patch \
"
SRC_URI[md5sum] = "c5441c73cc451764055ee65e9a4292bb"
@@ -41,13 +43,20 @@ inherit ptest
do_install_ptest() {
mkdir ${D}${PTEST_PATH}/test
ln -s ${bindir}/gawk ${D}${PTEST_PATH}/gawk
- for i in `grep -vE "@|^$|#|Gt-dummy" ${S}/test/Maketests |awk -F: '{print $1}'` Maketests inclib.awk; \
- do cp ${S}/test/$i* ${D}${PTEST_PATH}/test; \
+ # The list of tests is all targets in Maketests, apart from the dummy Gt-dummy
+ TESTS=$(awk -F: '$1 == "Gt-dummy" { next } /[[:alnum:]]+:$/ { print $1 }' ${S}/test/Maketests)
+ for i in $TESTS Maketests inclib.awk; do
+ cp ${S}/test/$i* ${D}${PTEST_PATH}/test
done
sed -i -e 's|/usr/local/bin|${bindir}|g' \
-e 's|#!${base_bindir}/awk|#!${bindir}/awk|g' ${D}${PTEST_PATH}/test/*.awk
- sed -i -e "s|GAWKLOCALE|LANG|g" ${D}${PTEST_PATH}/test/Maketests
+ sed -i -e "s|GAWKLOCALE|LANG|g" ${D}${PTEST_PATH}/test/Maketests
+
+ # These tests require an unloaded host as otherwise timing sensitive tests can fail
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=14371
+ rm -f ${D}${PTEST_PATH}/test/time.*
+ rm -f ${D}${PTEST_PATH}/test/timeout.*
}
RDEPENDS_${PN}-ptest += "make"
diff --git a/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch b/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch
new file mode 100644
index 0000000000..91b9f6df50
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch
@@ -0,0 +1,31 @@
+From d81b82c70bc1fb9991bb95f1201abb5dea55f57f Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Mon, 17 Jul 2023 14:06:37 +0100
+Subject: [PATCH] Bug 706897: Copy pcx buffer overrun fix from
+ devices/gdevpcx.c
+
+Bounds check the buffer, before dereferencing the pointer.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=d81b82c70bc1fb9991bb95f1201abb5dea55f57f]
+CVE: CVE-2023-38559
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gdevdevn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/base/gdevdevn.c b/base/gdevdevn.c
+index 3b019d6..2888776 100644
+--- a/base/gdevdevn.c
++++ b/base/gdevdevn.c
+@@ -1980,7 +1980,7 @@ devn_pcx_write_rle(const byte * from, const byte * end, int step, gp_file * file
+ byte data = *from;
+
+ from += step;
+- if (data != *from || from == end) {
++ if (from >= end || data != *from) {
+ if (data >= 0xc0)
+ gp_fputc(0xc1, file);
+ } else {
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch
new file mode 100644
index 0000000000..ea8bf26f3f
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-36773.patch
@@ -0,0 +1,109 @@
+From 8c7bd787defa071c96289b7da9397f673fddb874 Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Wed, 20 May 2020 16:02:07 +0100
+Subject: [PATCH] txtwrite - address memory problems
+
+Bug #702229 " txtwrite: use after free in 9.51 on some files (regression from 9.50)"
+Also bug #702346 and the earlier report #701877.
+
+The problems occur because its possible for a single character code in
+a PDF file to map to more than a single Unicode code point. In the case
+of the file for 701877 the character code maps to 'f' and 'i' (it is an
+fi ligature).
+
+The code should deal with this, but we need to ensure we are using the
+correct index. In addition, if we do get more Unicode code points than
+we expected, we need to set the widths of the 'extra' code points to
+zero (we only want to consider the width of the original character).
+
+This does mean increasing the size of the Widths array to cater for
+the possibility of more entries on output than there were on input.
+
+While working on it I noticed that the Unicode remapping on little-
+endian machines was reversing the order of the Unicode values, when
+there was more than a single code point returned, so fixed that at
+the same time.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;h=8c7bd787defa071c96289b7da9397f673fddb874]
+CVE: CVE-2020-36773
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ devices/vector/gdevtxtw.c | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+diff --git a/devices/vector/gdevtxtw.c b/devices/vector/gdevtxtw.c
+index 87f9355..bddce5a 100644
+--- a/devices/vector/gdevtxtw.c
++++ b/devices/vector/gdevtxtw.c
+@@ -1812,11 +1812,11 @@ static int get_unicode(textw_text_enum_t *penum, gs_font *font, gs_glyph glyph,
+ #else
+ b = (char *)Buffer;
+ u = (char *)unicode;
+- while (l >= 0) {
+- *b++ = *(u + l);
+- l--;
+- }
+
++ for (l=0;l<length;l+=2, u+=2){
++ *b++ = *(u+1);
++ *b++ = *u;
++ }
+ #endif
+ gs_free_object(penum->dev->memory, unicode, "free temporary unicode buffer");
+ return length / sizeof(short);
+@@ -1963,7 +1963,7 @@ txtwrite_process_plain_text(gs_text_enum_t *pte)
+ &penum->text_state->matrix, &wanted);
+ pte->returned.total_width.x += wanted.x;
+ pte->returned.total_width.y += wanted.y;
+- penum->Widths[pte->index - 1] = wanted.x;
++ penum->Widths[penum->TextBufferIndex] = wanted.x;
+
+ if (pte->text.operation & TEXT_ADD_TO_ALL_WIDTHS) {
+ gs_point tpt;
+@@ -1984,8 +1984,14 @@ txtwrite_process_plain_text(gs_text_enum_t *pte)
+ pte->returned.total_width.x += dpt.x;
+ pte->returned.total_width.y += dpt.y;
+
+- penum->TextBufferIndex += get_unicode(penum, (gs_font *)pte->orig_font, glyph, ch, &penum->TextBuffer[penum->TextBufferIndex]);
+- penum->Widths[pte->index - 1] += dpt.x;
++ penum->Widths[penum->TextBufferIndex] += dpt.x;
++ code = get_unicode(penum, (gs_font *)pte->orig_font, glyph, ch, &penum->TextBuffer[penum->TextBufferIndex]);
++ /* If a single text code returned multiple Unicode values, then we need to set the
++ * 'extra' code points' widths to 0.
++ */
++ if (code > 1)
++ memset(&penum->Widths[penum->TextBufferIndex + 1], 0x00, (code - 1) * sizeof(float));
++ penum->TextBufferIndex += code;
+ }
+ return 0;
+ }
+@@ -2123,7 +2129,7 @@ txt_add_fragment(gx_device_txtwrite_t *tdev, textw_text_enum_t *penum)
+ if (!penum->text_state->Widths)
+ return gs_note_error(gs_error_VMerror);
+ memset(penum->text_state->Widths, 0x00, penum->TextBufferIndex * sizeof(float));
+- memcpy(penum->text_state->Widths, penum->Widths, penum->text.size * sizeof(float));
++ memcpy(penum->text_state->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
+
+ unsorted_entry->Unicode_Text = (unsigned short *)gs_malloc(tdev->memory->stable_memory,
+ penum->TextBufferIndex, sizeof(unsigned short), "txtwrite alloc sorted text buffer");
+@@ -2136,7 +2142,7 @@ txt_add_fragment(gx_device_txtwrite_t *tdev, textw_text_enum_t *penum)
+ if (!unsorted_entry->Widths)
+ return gs_note_error(gs_error_VMerror);
+ memset(unsorted_entry->Widths, 0x00, penum->TextBufferIndex * sizeof(float));
+- memcpy(unsorted_entry->Widths, penum->Widths, penum->text.size * sizeof(float));
++ memcpy(unsorted_entry->Widths, penum->Widths, penum->TextBufferIndex * sizeof(float));
+
+ unsorted_entry->FontName = (char *)gs_malloc(tdev->memory->stable_memory,
+ (strlen(penum->text_state->FontName) + 1), sizeof(unsigned char), "txtwrite alloc sorted text buffer");
+@@ -2192,7 +2198,7 @@ textw_text_process(gs_text_enum_t *pte)
+ if (!penum->TextBuffer)
+ return gs_note_error(gs_error_VMerror);
+ penum->Widths = (float *)gs_malloc(tdev->memory->stable_memory,
+- pte->text.size, sizeof(float), "txtwrite temporary widths array");
++ pte->text.size * 4, sizeof(float), "txtwrite temporary widths array");
+ if (!penum->Widths)
+ return gs_note_error(gs_error_VMerror);
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch
new file mode 100644
index 0000000000..852f2459f7
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-28879.patch
@@ -0,0 +1,54 @@
+From 37ed5022cecd584de868933b5b60da2e995b3179 Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Fri, 24 Mar 2023 13:19:57 +0000
+Subject: [PATCH] Graphics library - prevent buffer overrun in (T)BCP encoding
+
+Bug #706494 "Buffer Overflow in s_xBCPE_process"
+
+As described in detail in the bug report, if the write buffer is filled
+to one byte less than full, and we then try to write an escaped
+character, we overrun the buffer because we don't check before
+writing two bytes to it.
+
+This just checks if we have two bytes before starting to write an
+escaped character and exits if we don't (replacing the consumed byte
+of the input).
+
+Up for further discussion; why do we even permit a BCP encoding filter
+anyway ? I think we should remove this, at least when SAFER is true.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;h=37ed5022cecd584de868933b5b60da2e995b3179]
+CVE: CVE-2023-28879
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/sbcp.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/base/sbcp.c b/base/sbcp.c
+index 6b0383c..90784b5 100644
+--- a/base/sbcp.c
++++ b/base/sbcp.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2001-2019 Artifex Software, Inc.
++/* Copyright (C) 2001-2023 Artifex Software, Inc.
+ All Rights Reserved.
+
+ This software is provided AS-IS with no warranty, either express or
+@@ -50,6 +50,14 @@ s_xBCPE_process(stream_state * st, stream_cursor_read * pr,
+ byte ch = *++p;
+
+ if (ch <= 31 && escaped[ch]) {
++ /* Make sure we have space to store two characters in the write buffer,
++ * if we don't then exit without consuming the input character, we'll process
++ * that on the next time round.
++ */
++ if (pw->limit - q < 2) {
++ p--;
++ break;
++ }
+ if (p == rlimit) {
+ p--;
+ break;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch
new file mode 100644
index 0000000000..a3bbe958eb
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-1.patch
@@ -0,0 +1,145 @@
+From 5e65eeae225c7d02d447de5abaf4a8e6d234fcea Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 7 Jun 2023 10:23:06 +0100
+Subject: [PATCH] Bug 706761: Don't "reduce" %pipe% file names for permission validation
+
+For regular file names, we try to simplfy relative paths before we use them.
+
+Because the %pipe% device can, effectively, accept command line calls, we
+shouldn't be simplifying that string, because the command line syntax can end
+up confusing the path simplifying code. That can result in permitting a pipe
+command which does not match what was originally permitted.
+
+Special case "%pipe" in the validation code so we always deal with the entire
+string.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=505eab7782b429017eb434b2b95120855f2b0e3c]
+CVE: CVE-2023-36664
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 31 +++++++++++++++++++--------
+ base/gslibctx.c | 56 ++++++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 64 insertions(+), 23 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index c4fffae..09ac6b3 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1046,16 +1046,29 @@ gp_validate_path_len(const gs_memory_t *mem,
+ && !memcmp(path + cdirstrl, dirsepstr, dirsepstrl)) {
+ prefix_len = 0;
+ }
+- rlen = len+1;
+- bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
+- if (bufferfull == NULL)
+- return gs_error_VMerror;
+-
+- buffer = bufferfull + prefix_len;
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
+
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
++ bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
++ if (bufferfull == NULL)
++ return gs_error_VMerror;
++
++ buffer = bufferfull + prefix_len;
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+ while (1) {
+ switch (mode[0])
+ {
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 20c5eee..355c0e3 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -719,14 +719,28 @@ gs_add_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const ch
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len + 1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_add_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++)
+@@ -802,14 +816,28 @@ gs_remove_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const
+ return gs_error_rangecheck;
+ }
+
+- rlen = len+1;
+- buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gp_validate_path");
+- if (buffer == NULL)
+- return gs_error_VMerror;
++ /* "%pipe%" do not follow the normal rules for path definitions, so we
++ don't "reduce" them to avoid unexpected results
++ */
++ if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++ memcpy(buffer, path, len);
++ buffer[len] = 0;
++ rlen = len;
++ }
++ else {
++ rlen = len+1;
+
+- if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
+- return gs_error_invalidfileaccess;
+- buffer[rlen] = 0;
++ buffer = (char *)gs_alloc_bytes(core->memory, rlen, "gs_remove_control_path_len");
++ if (buffer == NULL)
++ return gs_error_VMerror;
++
++ if (gp_file_name_reduce(path, (uint)len, buffer, &rlen) != gp_combine_success)
++ return gs_error_invalidfileaccess;
++ buffer[rlen] = 0;
++ }
+
+ n = control->num;
+ for (i = 0; i < n; i++) {
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch
new file mode 100644
index 0000000000..e8c42f1deb
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-2.patch
@@ -0,0 +1,60 @@
+From fb342fdb60391073a69147cb71af1ac416a81099 Mon Sep 17 00:00:00 2001
+From: Chris Liddell <chris.liddell@artifex.com>
+Date: Wed, 14 Jun 2023 09:08:12 +0100
+Subject: [PATCH] Bug 706778: 706761 revisit
+
+Two problems with the original commit. The first a silly typo inverting the
+logic of a test.
+
+The second was forgetting that we actually actually validate two candidate
+strings for pipe devices. One with the expected "%pipe%" prefix, the other
+using the pipe character prefix: "|".
+
+This addresses both those.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=fb342fdb60391073a69147cb71af1ac416a81099]
+CVE: CVE-2023-36664
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 2 +-
+ base/gslibctx.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index 09ac6b3..01d449f 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -1050,7 +1050,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ bufferfull = buffer = (char *)gs_alloc_bytes(mem->thread_safe_memory, len + 1, "gp_validate_path");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+diff --git a/base/gslibctx.c b/base/gslibctx.c
+index 355c0e3..d8f74a3 100644
+--- a/base/gslibctx.c
++++ b/base/gslibctx.c
+@@ -722,7 +722,7 @@ gs_add_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const ch
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_add_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+@@ -819,7 +819,7 @@ gs_remove_control_path_len(const gs_memory_t *mem, gs_path_control_t type, const
+ /* "%pipe%" do not follow the normal rules for path definitions, so we
+ don't "reduce" them to avoid unexpected results
+ */
+- if (len > 5 && memcmp(path, "%pipe", 5) != 0) {
++ if (path[0] == '|' || (len > 5 && memcmp(path, "%pipe", 5) == 0)) {
+ buffer = (char *)gs_alloc_bytes(core->memory, len + 1, "gs_remove_control_path_len");
+ if (buffer == NULL)
+ return gs_error_VMerror;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch
new file mode 100644
index 0000000000..662736bb3d
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-36664-pre1.patch
@@ -0,0 +1,62 @@
+From 4ceaf92815302863a8c86fcfcf2347e0118dd3a5 Mon Sep 17 00:00:00 2001
+From: Ray Johnston <ray.johnston@artifex.com>
+Date: Tue, 22 Sep 2020 13:10:04 -0700
+Subject: [PATCH] Fix gp_file allocations to use thread_safe_memory.
+
+The gpmisc.c does allocations for gp_file objects and buffers used by
+gp_fprintf, as well as gp_validate_path_len. The helgrind run with
+-dBGPrint -dNumRenderingThreads=4 and PCL input showed up the gp_fprintf
+problem since the clist rendering would call gp_fprintf using the same
+allocator (PCL's chunk allocator which is non_gc_memory). The chunk
+allocator is intentionally not thread safe (for performance).
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=4ceaf92815302863a8c86fcfcf2347e0118dd3a5]
+CVE: CVE-2023-36664 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ base/gpmisc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/base/gpmisc.c b/base/gpmisc.c
+index 34cd71f..c4fffae 100644
+--- a/base/gpmisc.c
++++ b/base/gpmisc.c
+@@ -435,7 +435,7 @@ generic_pwrite(gp_file *f, size_t count, gs_offset_t offset, const void *buf)
+
+ gp_file *gp_file_alloc(gs_memory_t *mem, const gp_file_ops_t *prototype, size_t size, const char *cname)
+ {
+- gp_file *file = (gp_file *)gs_alloc_bytes(mem->non_gc_memory, size, cname ? cname : "gp_file");
++ gp_file *file = (gp_file *)gs_alloc_bytes(mem->thread_safe_memory, size, cname ? cname : "gp_file");
+ if (file == NULL)
+ return NULL;
+
+@@ -449,7 +449,7 @@ gp_file *gp_file_alloc(gs_memory_t *mem, const gp_file_ops_t *prototype, size_t
+ memset(((char *)file)+sizeof(*prototype),
+ 0,
+ size - sizeof(*prototype));
+- file->memory = mem->non_gc_memory;
++ file->memory = mem->thread_safe_memory;
+
+ return file;
+ }
+@@ -1047,7 +1047,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ prefix_len = 0;
+ }
+ rlen = len+1;
+- bufferfull = (char *)gs_alloc_bytes(mem->non_gc_memory, rlen + prefix_len, "gp_validate_path");
++ bufferfull = (char *)gs_alloc_bytes(mem->thread_safe_memory, rlen + prefix_len, "gp_validate_path");
+ if (bufferfull == NULL)
+ return gs_error_VMerror;
+
+@@ -1093,7 +1093,7 @@ gp_validate_path_len(const gs_memory_t *mem,
+ break;
+ }
+
+- gs_free_object(mem->non_gc_memory, bufferfull, "gp_validate_path");
++ gs_free_object(mem->thread_safe_memory, bufferfull, "gp_validate_path");
+ #ifdef EACCES
+ if (code == gs_error_invalidfileaccess)
+ errno = EACCES;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
new file mode 100644
index 0000000000..3acb8a503c
--- /dev/null
+++ b/meta/recipes-extended/ghostscript/ghostscript/CVE-2023-43115.patch
@@ -0,0 +1,62 @@
+From 8b0f20002536867bd73ff4552408a72597190cbe Mon Sep 17 00:00:00 2001
+From: Ken Sharp <ken.sharp@artifex.com>
+Date: Thu, 24 Aug 2023 15:24:35 +0100
+Subject: [PATCH] IJS device - try and secure the IJS server startup
+
+Bug #707051 ""ijs" device can execute arbitrary commands"
+
+The problem is that the 'IJS' device needs to start the IJS server, and
+that is indeed an arbitrary command line. There is (apparently) no way
+to validate it. Indeed, this is covered quite clearly in the comments
+at the start of the source:
+
+ * WARNING: The ijs server can be selected on the gs command line
+ * which is a security risk, since any program can be run.
+
+Previously this used the awful LockSafetyParams hackery, which we
+abandoned some time ago because it simply couldn't be made secure (it
+was implemented in PostScript and was therefore vulnerable to PostScript
+programs).
+
+This commit prevents PostScript programs switching to the IJS device
+after SAFER has been activated, and prevents changes to the IjsServer
+parameter after SAFER has been activated.
+
+SAFER is activated, unless explicitly disabled, before any user
+PostScript is executed which means that the device and the server
+invocation can only be configured on the command line. This does at
+least provide minimal security against malicious PostScript programs.
+
+Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=e59216049cac290fb437a04c4f41ea46826cfba5]
+CVE: CVE-2023-43115
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ devices/gdevijs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/devices/gdevijs.c b/devices/gdevijs.c
+index 3d337c5..e50d69f 100644
+--- a/devices/gdevijs.c
++++ b/devices/gdevijs.c
+@@ -934,6 +934,9 @@ gsijs_finish_copydevice(gx_device *dev, const gx_device *from_dev)
+ static const char rgb[] = "DeviceRGB";
+ gx_device_ijs *ijsdev = (gx_device_ijs *)dev;
+
++ if (ijsdev->memory->gs_lib_ctx->core->path_control_active)
++ return_error(gs_error_invalidaccess);
++
+ code = gx_default_finish_copydevice(dev, from_dev);
+ if(code < 0)
+ return code;
+@@ -1363,7 +1366,7 @@ gsijs_put_params(gx_device *dev, gs_param_list *plist)
+ if (code >= 0)
+ code = gsijs_read_string(plist, "IjsServer",
+ ijsdev->IjsServer, sizeof(ijsdev->IjsServer),
+- dev->LockSafetyParams, is_open);
++ ijsdev->memory->gs_lib_ctx->core->path_control_active, is_open);
+
+ if (code >= 0)
+ code = gsijs_read_string_malloc(plist, "DeviceManufacturer",
+--
+2.25.1
+
diff --git a/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch b/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch
index 722bab4ddb..77eec7d158 100644
--- a/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch
+++ b/meta/recipes-extended/ghostscript/ghostscript/check-stack-limits-after-function-evalution.patch
@@ -14,7 +14,7 @@ stack than are available.
To cope, add in stack limit checking to throw an appropriate error when this
happens.
-
+CVE: CVE-2021-45944
Upstream-Status: Backported [https://git.ghostscript.com/?p=ghostpdl.git;a=patch;h=7861fcad13c497728189feafb41cd57b5b50ea25]
Signed-off-by: Minjae Kim <flowergom@gmail.com>
---
diff --git a/meta/recipes-extended/ghostscript/ghostscript_9.52.bb b/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
index a829d4b4ae..e57f592892 100644
--- a/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
+++ b/meta/recipes-extended/ghostscript/ghostscript_9.52.bb
@@ -39,6 +39,13 @@ SRC_URI_BASE = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/d
file://CVE-2021-3781_1.patch \
file://CVE-2021-3781_2.patch \
file://CVE-2021-3781_3.patch \
+ file://CVE-2023-28879.patch \
+ file://0001-Bug-706897-Copy-pcx-buffer-overrun-fix-from-devices-.patch \
+ file://CVE-2023-36664-pre1.patch \
+ file://CVE-2023-36664-1.patch \
+ file://CVE-2023-36664-2.patch \
+ file://CVE-2023-43115.patch \
+ file://CVE-2020-36773.patch \
"
SRC_URI = "${SRC_URI_BASE} \
diff --git a/meta/recipes-extended/less/less/CVE-2022-48624.patch b/meta/recipes-extended/less/less/CVE-2022-48624.patch
new file mode 100644
index 0000000000..409730bd4f
--- /dev/null
+++ b/meta/recipes-extended/less/less/CVE-2022-48624.patch
@@ -0,0 +1,41 @@
+From c6ac6de49698be84d264a0c4c0c40bb870b10144 Mon Sep 17 00:00:00 2001
+From: Mark Nudelman <markn@greenwoodsoftware.com>
+Date: Sat, 25 Jun 2022 11:54:43 -0700
+Subject: [PATCH] Shell-quote filenames when invoking LESSCLOSE.
+
+Upstream-Status: Backport [https://github.com/gwsw/less/commit/c6ac6de49698be84d264a0c4c0c40bb870b10144]
+CVE: CVE-2022-48624
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ filename.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/filename.c b/filename.c
+index 5824e385..dff20c08 100644
+--- a/filename.c
++++ b/filename.c
+@@ -972,6 +972,8 @@ close_altfile(altfilename, filename)
+ {
+ #if HAVE_POPEN
+ char *lessclose;
++ char *qfilename;
++ char *qaltfilename;
+ FILE *fd;
+ char *cmd;
+ int len;
+@@ -986,9 +988,13 @@ close_altfile(altfilename, filename)
+ error("LESSCLOSE ignored; must contain no more than 2 %%s", NULL_PARG);
+ return;
+ }
+- len = (int) (strlen(lessclose) + strlen(filename) + strlen(altfilename) + 2);
++ qfilename = shell_quote(filename);
++ qaltfilename = shell_quote(altfilename);
++ len = (int) (strlen(lessclose) + strlen(qfilename) + strlen(qaltfilename) + 2);
+ cmd = (char *) ecalloc(len, sizeof(char));
+- SNPRINTF2(cmd, len, lessclose, filename, altfilename);
++ SNPRINTF2(cmd, len, lessclose, qfilename, qaltfilename);
++ free(qaltfilename);
++ free(qfilename);
+ fd = shellcmd(cmd);
+ free(cmd);
+ if (fd != NULL)
diff --git a/meta/recipes-extended/less/less_551.bb b/meta/recipes-extended/less/less_551.bb
index a818c68fc7..401f40bed5 100644
--- a/meta/recipes-extended/less/less_551.bb
+++ b/meta/recipes-extended/less/less_551.bb
@@ -26,6 +26,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
DEPENDS = "ncurses"
SRC_URI = "http://www.greenwoodsoftware.com/${BPN}/${BPN}-${PV}.tar.gz \
+ file://CVE-2022-48624.patch \
"
SRC_URI[md5sum] = "4ad4408b06d7a6626a055cb453f36819"
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch
new file mode 100644
index 0000000000..555c7a47f7
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-23177.patch
@@ -0,0 +1,183 @@
+Description: Fix handling of symbolic link ACLs
+ Published as CVE-2021-23177
+Origin: upstream, https://github.com/libarchive/libarchive/commit/fba4f123cc456d2b2538f811bb831483bf336bad
+Bug-Debian: https://bugs.debian.org/1001986
+Author: Martin Matuska <martin@matuska.org>
+Last-Updated: 2021-12-20
+
+CVE: CVE-2021-23177
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/libarchive/archive_disk_acl_freebsd.c
++++ b/libarchive/archive_disk_acl_freebsd.c
+@@ -319,7 +319,7 @@
+
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ int acl_type = 0;
+@@ -364,6 +364,13 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (acl_type == ACL_TYPE_DEFAULT && !S_ISDIR(mode)) {
++ errno = EINVAL;
++ archive_set_error(a, errno,
++ "Cannot set default ACL on non-directory");
++ return (ARCHIVE_WARN);
++ }
++
+ acl = acl_init(entries);
+ if (acl == (acl_t)NULL) {
+ archive_set_error(a, errno,
+@@ -542,7 +549,10 @@
+ else if (acl_set_link_np(name, acl_type, acl) != 0)
+ #else
+ /* FreeBSD older than 8.0 */
+- else if (acl_set_file(name, acl_type, acl) != 0)
++ else if (S_ISLNK(mode)) {
++ /* acl_set_file() follows symbolic links, skip */
++ ret = ARCHIVE_OK;
++ } else if (acl_set_file(name, acl_type, acl) != 0)
+ #endif
+ {
+ if (errno == EOPNOTSUPP) {
+@@ -677,14 +687,14 @@
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_ACCESS, "access");
+ if (ret != ARCHIVE_OK)
+ return (ret);
+ }
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_DEFAULT) != 0)
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, "default");
+
+ /* Simultaneous POSIX.1e and NFSv4 is not supported */
+@@ -693,7 +703,7 @@
+ #if ARCHIVE_ACL_FREEBSD_NFS4
+ else if ((archive_acl_types(abstract_acl) &
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4");
+ }
+ #endif
+--- a/libarchive/archive_disk_acl_linux.c
++++ b/libarchive/archive_disk_acl_linux.c
+@@ -343,6 +343,11 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Linux does not support RichACLs on symbolic links */
++ return (ARCHIVE_OK);
++ }
++
+ richacl = richacl_alloc(entries);
+ if (richacl == NULL) {
+ archive_set_error(a, errno,
+@@ -455,7 +460,7 @@
+ #if ARCHIVE_ACL_LIBACL
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ int acl_type = 0;
+@@ -488,6 +493,18 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Linux does not support ACLs on symbolic links */
++ return (ARCHIVE_OK);
++ }
++
++ if (acl_type == ACL_TYPE_DEFAULT && !S_ISDIR(mode)) {
++ errno = EINVAL;
++ archive_set_error(a, errno,
++ "Cannot set default ACL on non-directory");
++ return (ARCHIVE_WARN);
++ }
++
+ acl = acl_init(entries);
+ if (acl == (acl_t)NULL) {
+ archive_set_error(a, errno,
+@@ -727,14 +744,14 @@
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_ACCESS, "access");
+ if (ret != ARCHIVE_OK)
+ return (ret);
+ }
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_DEFAULT) != 0)
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_DEFAULT, "default");
+ }
+ #endif /* ARCHIVE_ACL_LIBACL */
+--- a/libarchive/archive_disk_acl_sunos.c
++++ b/libarchive/archive_disk_acl_sunos.c
+@@ -443,7 +443,7 @@
+
+ static int
+ set_acl(struct archive *a, int fd, const char *name,
+- struct archive_acl *abstract_acl,
++ struct archive_acl *abstract_acl, __LA_MODE_T mode,
+ int ae_requested_type, const char *tname)
+ {
+ aclent_t *aclent;
+@@ -467,7 +467,6 @@
+ if (entries == 0)
+ return (ARCHIVE_OK);
+
+-
+ switch (ae_requested_type) {
+ case ARCHIVE_ENTRY_ACL_TYPE_POSIX1E:
+ cmd = SETACL;
+@@ -492,6 +491,12 @@
+ return (ARCHIVE_FAILED);
+ }
+
++ if (S_ISLNK(mode)) {
++ /* Skip ACLs on symbolic links */
++ ret = ARCHIVE_OK;
++ goto exit_free;
++ }
++
+ e = 0;
+
+ while (archive_acl_next(a, abstract_acl, ae_requested_type, &ae_type,
+@@ -801,7 +806,7 @@
+ if ((archive_acl_types(abstract_acl)
+ & ARCHIVE_ENTRY_ACL_TYPE_POSIX1E) != 0) {
+ /* Solaris writes POSIX.1e access and default ACLs together */
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_POSIX1E, "posix1e");
+
+ /* Simultaneous POSIX.1e and NFSv4 is not supported */
+@@ -810,7 +815,7 @@
+ #if ARCHIVE_ACL_SUNOS_NFS4
+ else if ((archive_acl_types(abstract_acl) &
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4) != 0) {
+- ret = set_acl(a, fd, name, abstract_acl,
++ ret = set_acl(a, fd, name, abstract_acl, mode,
+ ARCHIVE_ENTRY_ACL_TYPE_NFS4, "nfs4");
+ }
+ #endif
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch
new file mode 100644
index 0000000000..c4a2fb612c
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-01.patch
@@ -0,0 +1,23 @@
+Description: Never follow symlinks when setting file flags on Linux
+ Published as CVE-2021-31566
+Origin: upstream, https://github.com/libarchive/libarchive/commit/e2ad1a2c3064fa9eba6274b3641c4c1beed25c0b
+Bug-Debian: https://bugs.debian.org/1001990
+Author: Martin Matuska <martin@matuska.org>
+Last-Update: 2021-12-20
+
+CVE: CVE-2021-31566
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/libarchive/archive_write_disk_posix.c
++++ b/libarchive/archive_write_disk_posix.c
+@@ -3927,7 +3927,8 @@
+
+ /* If we weren't given an fd, open it ourselves. */
+ if (myfd < 0) {
+- myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY | O_CLOEXEC);
++ myfd = open(name, O_RDONLY | O_NONBLOCK | O_BINARY |
++ O_CLOEXEC | O_NOFOLLOW);
+ __archive_ensure_cloexec_flag(myfd);
+ }
+ if (myfd < 0)
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch
new file mode 100644
index 0000000000..0dfcd1ac5c
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2021-31566-02.patch
@@ -0,0 +1,172 @@
+Description: Do not follow symlinks when processing the fixup list
+ Published as CVE-2021-31566
+Origin: upstream, https://github.com/libarchive/libarchive/commit/b41daecb5ccb4c8e3b2c53fd6147109fc12c3043
+Bug-Debian: https://bugs.debian.org/1001990
+Author: Martin Matuska <martin@matuska.org>
+Last-Update: 2021-12-20
+
+CVE: CVE-2021-31566
+Upstream-Status: Backport [http://deb.debian.org/debian/pool/main/liba/libarchive/libarchive_3.4.3-2+deb11u1.debian.tar.xz]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -556,6 +556,7 @@
+ libarchive/test/test_write_disk.c \
+ libarchive/test/test_write_disk_appledouble.c \
+ libarchive/test/test_write_disk_failures.c \
++ libarchive/test/test_write_disk_fixup.c \
+ libarchive/test/test_write_disk_hardlink.c \
+ libarchive/test/test_write_disk_hfs_compression.c \
+ libarchive/test/test_write_disk_lookup.c \
+--- a/libarchive/archive_write_disk_posix.c
++++ b/libarchive/archive_write_disk_posix.c
+@@ -2461,6 +2461,7 @@
+ {
+ struct archive_write_disk *a = (struct archive_write_disk *)_a;
+ struct fixup_entry *next, *p;
++ struct stat st;
+ int fd, ret;
+
+ archive_check_magic(&a->archive, ARCHIVE_WRITE_DISK_MAGIC,
+@@ -2478,6 +2479,20 @@
+ (TODO_TIMES | TODO_MODE_BASE | TODO_ACLS | TODO_FFLAGS)) {
+ fd = open(p->name,
+ O_WRONLY | O_BINARY | O_NOFOLLOW | O_CLOEXEC);
++ if (fd == -1) {
++ /* If we cannot lstat, skip entry */
++ if (lstat(p->name, &st) != 0)
++ goto skip_fixup_entry;
++ /*
++ * If we deal with a symbolic link, mark
++ * it in the fixup mode to ensure no
++ * modifications are made to its target.
++ */
++ if (S_ISLNK(st.st_mode)) {
++ p->mode &= ~S_IFMT;
++ p->mode |= S_IFLNK;
++ }
++ }
+ }
+ if (p->fixup & TODO_TIMES) {
+ set_times(a, fd, p->mode, p->name,
+@@ -2492,7 +2507,12 @@
+ fchmod(fd, p->mode);
+ else
+ #endif
+- chmod(p->name, p->mode);
++#ifdef HAVE_LCHMOD
++ lchmod(p->name, p->mode);
++#else
++ if (!S_ISLNK(p->mode))
++ chmod(p->name, p->mode);
++#endif
+ }
+ if (p->fixup & TODO_ACLS)
+ archive_write_disk_set_acls(&a->archive, fd,
+@@ -2503,6 +2523,7 @@
+ if (p->fixup & TODO_MAC_METADATA)
+ set_mac_metadata(a, p->name, p->mac_metadata,
+ p->mac_metadata_size);
++skip_fixup_entry:
+ next = p->next;
+ archive_acl_clear(&p->acl);
+ free(p->mac_metadata);
+@@ -2643,6 +2664,7 @@
+ fe->next = a->fixup_list;
+ a->fixup_list = fe;
+ fe->fixup = 0;
++ fe->mode = 0;
+ fe->name = strdup(pathname);
+ return (fe);
+ }
+--- a/libarchive/test/CMakeLists.txt
++++ b/libarchive/test/CMakeLists.txt
+@@ -208,6 +208,7 @@
+ test_write_disk.c
+ test_write_disk_appledouble.c
+ test_write_disk_failures.c
++ test_write_disk_fixup.c
+ test_write_disk_hardlink.c
+ test_write_disk_hfs_compression.c
+ test_write_disk_lookup.c
+--- /dev/null
++++ b/libarchive/test/test_write_disk_fixup.c
+@@ -0,0 +1,77 @@
++/*-
++ * Copyright (c) 2021 Martin Matuska
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include "test.h"
++
++/*
++ * Test fixup entries don't follow symlinks
++ */
++DEFINE_TEST(test_write_disk_fixup)
++{
++ struct archive *ad;
++ struct archive_entry *ae;
++ int r;
++
++ if (!canSymlink()) {
++ skipping("Symlinks not supported");
++ return;
++ }
++
++ /* Write entries to disk. */
++ assert((ad = archive_write_disk_new()) != NULL);
++
++ /*
++ * Create a file
++ */
++ assertMakeFile("victim", 0600, "a");
++
++ /*
++ * Create a directory and a symlink with the same name
++ */
++
++ /* Directory: dir */
++ assert((ae = archive_entry_new()) != NULL);
++ archive_entry_copy_pathname(ae, "dir");
++ archive_entry_set_mode(ae, AE_IFDIR | 0606);
++ assertEqualIntA(ad, 0, archive_write_header(ad, ae));
++ assertEqualIntA(ad, 0, archive_write_finish_entry(ad));
++ archive_entry_free(ae);
++
++ /* Symbolic Link: dir -> foo */
++ assert((ae = archive_entry_new()) != NULL);
++ archive_entry_copy_pathname(ae, "dir");
++ archive_entry_set_mode(ae, AE_IFLNK | 0777);
++ archive_entry_set_size(ae, 0);
++ archive_entry_copy_symlink(ae, "victim");
++ assertEqualIntA(ad, 0, r = archive_write_header(ad, ae));
++ if (r >= ARCHIVE_WARN)
++ assertEqualIntA(ad, 0, archive_write_finish_entry(ad));
++ archive_entry_free(ae);
++
++ assertEqualInt(ARCHIVE_OK, archive_write_free(ad));
++
++ /* Test the entries on disk. */
++ assertIsSymlink("dir", "victim", 0);
++ assertFileMode("victim", 0600);
++}
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch
new file mode 100644
index 0000000000..501fcc5848
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2022-26280.patch
@@ -0,0 +1,29 @@
+From cfaa28168a07ea4a53276b63068f94fce37d6aff Mon Sep 17 00:00:00 2001
+From: Tim Kientzle <kientzle@acm.org>
+Date: Thu, 24 Mar 2022 10:35:00 +0100
+Subject: [PATCH] ZIP reader: fix possible out-of-bounds read in
+ zipx_lzma_alone_init()
+
+Fixes #1672
+
+CVE: CVE-2022-26280
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/commit/cfaa28168a07ea4a53276b63068f94fce37d6aff]
+Signed-off-by: Andrej Valek <andrej.valek@siemens.com>
+
+---
+ libarchive/archive_read_support_format_zip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libarchive/archive_read_support_format_zip.c b/libarchive/archive_read_support_format_zip.c
+index 38ada70b5..9d6c900b2 100644
+--- a/libarchive/archive_read_support_format_zip.c
++++ b/libarchive/archive_read_support_format_zip.c
+@@ -1667,7 +1667,7 @@ zipx_lzma_alone_init(struct archive_read *a, struct zip *zip)
+ */
+
+ /* Read magic1,magic2,lzma_params from the ZIPX stream. */
+- if((p = __archive_read_ahead(a, 9, NULL)) == NULL) {
++ if(zip->entry_bytes_remaining < 9 || (p = __archive_read_ahead(a, 9, NULL)) == NULL) {
+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
+ "Truncated lzma data");
+ return (ARCHIVE_FATAL);
diff --git a/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch b/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch
new file mode 100644
index 0000000000..980a0e884a
--- /dev/null
+++ b/meta/recipes-extended/libarchive/libarchive/CVE-2022-36227.patch
@@ -0,0 +1,43 @@
+From 6311080bff566fcc5591dadfd78efb41705b717f Mon Sep 17 00:00:00 2001
+From: obiwac <obiwac@gmail.com>
+Date: Fri, 22 Jul 2022 22:41:10 +0200
+Subject: [PATCH] CVE-2022-36227
+
+libarchive: CVE-2022-36227 Handle a `calloc` returning NULL (fixes #1754)
+
+Upstream-Status: Backport [https://github.com/libarchive/libarchive/commit/bff38efe8c110469c5080d387bec62a6ca15b1a5]
+CVE: CVE-2022-36227
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com
+---
+ libarchive/archive_write.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/libarchive/archive_write.c b/libarchive/archive_write.c
+index 98a55fb..7fe88b6 100644
+--- a/libarchive/archive_write.c
++++ b/libarchive/archive_write.c
+@@ -211,6 +211,10 @@ __archive_write_allocate_filter(struct archive *_a)
+ struct archive_write_filter *f;
+
+ f = calloc(1, sizeof(*f));
++
++ if (f == NULL)
++ return (NULL);
++
+ f->archive = _a;
+ f->state = ARCHIVE_WRITE_FILTER_STATE_NEW;
+ if (a->filter_first == NULL)
+@@ -527,6 +531,10 @@ archive_write_open(struct archive *_a, void *client_data,
+ a->client_data = client_data;
+
+ client_filter = __archive_write_allocate_filter(_a);
++
++ if (client_filter == NULL)
++ return (ARCHIVE_FATAL);
++
+ client_filter->open = archive_write_client_open;
+ client_filter->write = archive_write_client_write;
+ client_filter->close = archive_write_client_close;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/libarchive/libarchive_3.4.2.bb b/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
index b7426a1be8..728eedc401 100644
--- a/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
+++ b/meta/recipes-extended/libarchive/libarchive_3.4.2.bb
@@ -36,11 +36,19 @@ SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \
file://CVE-2021-36976-1.patch \
file://CVE-2021-36976-2.patch \
file://CVE-2021-36976-3.patch \
+ file://CVE-2021-23177.patch \
+ file://CVE-2021-31566-01.patch \
+ file://CVE-2021-31566-02.patch \
+ file://CVE-2022-26280.patch \
+ file://CVE-2022-36227.patch \
"
SRC_URI[md5sum] = "d953ed6b47694dadf0e6042f8f9ff451"
SRC_URI[sha256sum] = "b60d58d12632ecf1e8fad7316dc82c6b9738a35625746b47ecdcaf4aed176176"
+# upstream-wontfix: upstream has documented that reported function is not thread-safe
+CVE_CHECK_WHITELIST += "CVE-2023-30571"
+
inherit autotools update-alternatives pkgconfig
CPPFLAGS += "-I${WORKDIR}/extra-includes"
diff --git a/meta/recipes-extended/libnss-nis/libnss-nis.bb b/meta/recipes-extended/libnss-nis/libnss-nis.bb
index 984cc98fc2..0ec64544be 100644
--- a/meta/recipes-extended/libnss-nis/libnss-nis.bb
+++ b/meta/recipes-extended/libnss-nis/libnss-nis.bb
@@ -13,9 +13,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
SECTION = "libs"
DEPENDS += "libtirpc libnsl2"
-PV = "3.1+git${SRCPV}"
+PV = "3.2"
-SRCREV = "062f31999b35393abf7595cb89dfc9590d5a42ad"
+SRCREV = "cd0d391af9535b56e612ed227c1b89be269f3d59"
SRC_URI = "git://github.com/thkukuk/libnss_nis;branch=master;protocol=https \
"
diff --git a/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
new file mode 100644
index 0000000000..c78e7ef4d5
--- /dev/null
+++ b/meta/recipes-extended/libtirpc/libtirpc/CVE-2021-46828.patch
@@ -0,0 +1,155 @@
+From 48309e7cb230fc539c3edab0b3363f8ce973194f Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 28 Jul 2022 09:11:04 +0530
+Subject: [PATCH] CVE-2021-46828
+
+Upstream-Status: Backport [http://git.linux-nfs.org/?p=steved/libtirpc.git;a=commit;h=86529758570cef4c73fb9b9c4104fdc510f701ed}
+CVE: CVE-2021-46828
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ src/svc.c | 17 +++++++++++++-
+ src/svc_vc.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 77 insertions(+), 2 deletions(-)
+
+diff --git a/src/svc.c b/src/svc.c
+index 6db164b..3a8709f 100644
+--- a/src/svc.c
++++ b/src/svc.c
+@@ -57,7 +57,7 @@
+
+ #define max(a, b) (a > b ? a : b)
+
+-static SVCXPRT **__svc_xports;
++SVCXPRT **__svc_xports;
+ int __svc_maxrec;
+
+ /*
+@@ -194,6 +194,21 @@ __xprt_do_unregister (xprt, dolock)
+ rwlock_unlock (&svc_fd_lock);
+ }
+
++int
++svc_open_fds()
++{
++ int ix;
++ int nfds = 0;
++
++ rwlock_rdlock (&svc_fd_lock);
++ for (ix = 0; ix < svc_max_pollfd; ++ix) {
++ if (svc_pollfd[ix].fd != -1)
++ nfds++;
++ }
++ rwlock_unlock (&svc_fd_lock);
++ return (nfds);
++}
++
+ /*
+ * Add a service program to the callout list.
+ * The dispatch routine will be called when a rpc request for this
+diff --git a/src/svc_vc.c b/src/svc_vc.c
+index c23cd36..1729963 100644
+--- a/src/svc_vc.c
++++ b/src/svc_vc.c
+@@ -64,6 +64,8 @@
+
+
+ extern rwlock_t svc_fd_lock;
++extern SVCXPRT **__svc_xports;
++extern int svc_open_fds();
+
+ static SVCXPRT *makefd_xprt(int, u_int, u_int);
+ static bool_t rendezvous_request(SVCXPRT *, struct rpc_msg *);
+@@ -82,6 +84,7 @@ static void svc_vc_ops(SVCXPRT *);
+ static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
+ static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
+ void *in);
++static int __svc_destroy_idle(int timeout);
+
+ struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
+ u_int sendsize;
+@@ -312,13 +315,14 @@ done:
+ return (xprt);
+ }
+
++
+ /*ARGSUSED*/
+ static bool_t
+ rendezvous_request(xprt, msg)
+ SVCXPRT *xprt;
+ struct rpc_msg *msg;
+ {
+- int sock, flags;
++ int sock, flags, nfds, cnt;
+ struct cf_rendezvous *r;
+ struct cf_conn *cd;
+ struct sockaddr_storage addr;
+@@ -378,6 +382,16 @@ again:
+
+ gettimeofday(&cd->last_recv_time, NULL);
+
++ nfds = svc_open_fds();
++ if (nfds >= (_rpc_dtablesize() / 5) * 4) {
++ /* destroy idle connections */
++ cnt = __svc_destroy_idle(15);
++ if (cnt == 0) {
++ /* destroy least active */
++ __svc_destroy_idle(0);
++ }
++ }
++
+ return (FALSE); /* there is never an rpc msg to be processed */
+ }
+
+@@ -819,3 +833,49 @@ __svc_clean_idle(fd_set *fds, int timeout, bool_t cleanblock)
+ {
+ return FALSE;
+ }
++
++static int
++__svc_destroy_idle(int timeout)
++{
++ int i, ncleaned = 0;
++ SVCXPRT *xprt, *least_active;
++ struct timeval tv, tdiff, tmax;
++ struct cf_conn *cd;
++
++ gettimeofday(&tv, NULL);
++ tmax.tv_sec = tmax.tv_usec = 0;
++ least_active = NULL;
++ rwlock_wrlock(&svc_fd_lock);
++
++ for (i = 0; i <= svc_max_pollfd; i++) {
++ if (svc_pollfd[i].fd == -1)
++ continue;
++ xprt = __svc_xports[i];
++ if (xprt == NULL || xprt->xp_ops == NULL ||
++ xprt->xp_ops->xp_recv != svc_vc_recv)
++ continue;
++ cd = (struct cf_conn *)xprt->xp_p1;
++ if (!cd->nonblock)
++ continue;
++ if (timeout == 0) {
++ timersub(&tv, &cd->last_recv_time, &tdiff);
++ if (timercmp(&tdiff, &tmax, >)) {
++ tmax = tdiff;
++ least_active = xprt;
++ }
++ continue;
++ }
++ if (tv.tv_sec - cd->last_recv_time.tv_sec > timeout) {
++ __xprt_unregister_unlocked(xprt);
++ __svc_vc_dodestroy(xprt);
++ ncleaned++;
++ }
++ }
++ if (timeout == 0 && least_active != NULL) {
++ __xprt_unregister_unlocked(least_active);
++ __svc_vc_dodestroy(least_active);
++ ncleaned++;
++ }
++ rwlock_unlock(&svc_fd_lock);
++ return (ncleaned);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb b/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
index 10a324c3b6..80151ff83a 100644
--- a/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
+++ b/meta/recipes-extended/libtirpc/libtirpc_1.2.6.bb
@@ -9,7 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=f835cce8852481e4b2bbbdd23b5e47f3 \
PROVIDES = "virtual/librpc"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2"
+SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BP}.tar.bz2 \
+ file://CVE-2021-46828.patch \
+ "
UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/libtirpc/files/libtirpc/"
UPSTREAM_CHECK_REGEX = "(?P<pver>\d+(\.\d+)+)/"
SRC_URI[md5sum] = "b25f9cc18bfad50f7c446c77f4ae00bb"
@@ -20,7 +22,7 @@ inherit autotools pkgconfig
EXTRA_OECONF = "--disable-gssapi"
do_install_append() {
- chown root:root ${D}${sysconfdir}/netconfig
+ test -e ${D}${sysconfdir}/netconfig && chown root:root ${D}${sysconfdir}/netconfig
}
BBCLASSEXTEND = "native nativesdk"
diff --git a/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch b/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch
new file mode 100644
index 0000000000..8e0a06cbc7
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/CVE-2023-28736.patch
@@ -0,0 +1,77 @@
+From ced5fa8b170ad448f4076e24a10c731b5cfb36ce Mon Sep 17 00:00:00 2001
+From: Blazej Kucman <blazej.kucman@intel.com>
+Date: Fri, 3 Dec 2021 15:31:15 +0100
+Subject: mdadm: block creation with long names
+
+This fixes buffer overflows in create_mddev(). It prohibits
+creation with not supported names for DDF and native. For IMSM,
+mdadm will do silent cut to 16 later.
+
+Signed-off-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Blazej Kucman <blazej.kucman@intel.com>
+Signed-off-by: Jes Sorensen <jsorensen@fb.com>
+---
+
+Upstream-Status: Backport from [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/patch/?id=ced5fa8b170ad448f4076e24a10c731b5cfb36ce]
+CVE: CVE-2023-28736
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ mdadm.8.in | 5 +++++
+ mdadm.c | 9 ++++++++-
+ mdadm.h | 5 +++++
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/mdadm.8.in b/mdadm.8.in
+index 28d773c2..68e100cb 100644
+--- a/mdadm.8.in
++++ b/mdadm.8.in
+@@ -2186,6 +2186,11 @@ is run, but will be created by
+ .I udev
+ once the array becomes active.
+
++The max length md-device name is limited to 32 characters.
++Different metadata types have more strict limitation
++(like IMSM where only 16 characters are allowed).
++For that reason, long name could be truncated or rejected, it depends on metadata policy.
++
+ As devices are added, they are checked to see if they contain RAID
+ superblocks or filesystems. They are also checked to see if the variance in
+ device size exceeds 1%.
+diff --git a/mdadm.c b/mdadm.c
+index 91e67467..26299b2e 100644
+--- a/mdadm.c
++++ b/mdadm.c
+@@ -1359,9 +1359,16 @@ int main(int argc, char *argv[])
+ mdfd = open_mddev(devlist->devname, 1);
+ if (mdfd < 0)
+ exit(1);
+- } else
++ } else {
++ char *bname = basename(devlist->devname);
++
++ if (strlen(bname) > MD_NAME_MAX) {
++ pr_err("Name %s is too long.\n", devlist->devname);
++ exit(1);
++ }
+ /* non-existent device is OK */
+ mdfd = open_mddev(devlist->devname, 0);
++ }
+ if (mdfd == -2) {
+ pr_err("device %s exists but is not an md array.\n", devlist->devname);
+ exit(1);
+diff --git a/mdadm.h b/mdadm.h
+index 54567396..c7268a71 100644
+--- a/mdadm.h
++++ b/mdadm.h
+@@ -1880,3 +1880,8 @@ enum r0layout {
+ #define INVALID_SECTORS 1
+ /* And another special number needed for --data_offset=variable */
+ #define VARIABLE_OFFSET 3
++
++/**
++ * This is true for native and DDF, IMSM allows 16.
++ */
++#define MD_NAME_MAX 32
+--
+cgit
+
diff --git a/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch b/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch
new file mode 100644
index 0000000000..1e2990d79a
--- /dev/null
+++ b/meta/recipes-extended/mdadm/files/CVE-2023-28938.patch
@@ -0,0 +1,80 @@
+From 7d374a1869d3a84971d027a7f4233878c8f25a62 Mon Sep 17 00:00:00 2001
+From: Mateusz Grzonka <mateusz.grzonka@intel.com>
+Date: Tue, 27 Jul 2021 10:25:18 +0200
+Subject: Fix memory leak after "mdadm --detail"
+
+Signed-off-by: Mateusz Grzonka <mateusz.grzonka@intel.com>
+Signed-off-by: Jes Sorensen <jsorensen@fb.com>
+---
+Upstream-Status: Backport from [https://git.kernel.org/pub/scm/utils/mdadm/mdadm.git/patch/?id=7d374a1869d3a84971d027a7f4233878c8f25a62]
+CVE: CVE-2023-28938
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Detail.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/Detail.c b/Detail.c
+index ad56344f..d3af0ab5 100644
+--- a/Detail.c
++++ b/Detail.c
+@@ -66,11 +66,11 @@ int Detail(char *dev, struct context *c)
+ int spares = 0;
+ struct stat stb;
+ int failed = 0;
+- struct supertype *st;
++ struct supertype *st = NULL;
+ char *subarray = NULL;
+ int max_disks = MD_SB_DISKS; /* just a default */
+ struct mdinfo *info = NULL;
+- struct mdinfo *sra;
++ struct mdinfo *sra = NULL;
+ struct mdinfo *subdev;
+ char *member = NULL;
+ char *container = NULL;
+@@ -93,8 +93,7 @@ int Detail(char *dev, struct context *c)
+ if (!sra) {
+ if (md_get_array_info(fd, &array)) {
+ pr_err("%s does not appear to be an md device\n", dev);
+- close(fd);
+- return rv;
++ goto out;
+ }
+ }
+ external = (sra != NULL && sra->array.major_version == -1 &&
+@@ -108,16 +107,13 @@ int Detail(char *dev, struct context *c)
+ sra->devs == NULL) {
+ pr_err("Array associated with md device %s does not exist.\n",
+ dev);
+- close(fd);
+- sysfs_free(sra);
+- return rv;
++ goto out;
+ }
+ array = sra->array;
+ } else {
+ pr_err("cannot get array detail for %s: %s\n",
+ dev, strerror(errno));
+- close(fd);
+- return rv;
++ goto out;
+ }
+ }
+
+@@ -827,10 +823,12 @@ out:
+ close(fd);
+ free(subarray);
+ free(avail);
+- for (d = 0; d < n_devices; d++)
+- free(devices[d]);
++ if (devices)
++ for (d = 0; d < n_devices; d++)
++ free(devices[d]);
+ free(devices);
+ sysfs_free(sra);
++ free(st);
+ return rv;
+ }
+
+--
+cgit
+
diff --git a/meta/recipes-extended/mdadm/mdadm_4.1.bb b/meta/recipes-extended/mdadm/mdadm_4.1.bb
index bb77759cf9..ca326fd1cb 100644
--- a/meta/recipes-extended/mdadm/mdadm_4.1.bb
+++ b/meta/recipes-extended/mdadm/mdadm_4.1.bb
@@ -24,6 +24,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/raid/mdadm/${BPN}-${PV}.tar.xz \
file://0001-mdadm-add-option-y-for-use-syslog-to-recive-event-re.patch \
file://include_sysmacros.patch \
file://0001-mdadm-skip-test-11spare-migration.patch \
+ file://CVE-2023-28736.patch \
+ file://CVE-2023-28938.patch \
"
SRC_URI[md5sum] = "51bf3651bd73a06c413a2f964f299598"
diff --git a/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
new file mode 100644
index 0000000000..33ac37b7f0
--- /dev/null
+++ b/meta/recipes-extended/pam/libpam/CVE-2024-22365.patch
@@ -0,0 +1,59 @@
+From 031bb5a5d0d950253b68138b498dc93be69a64cb Mon Sep 17 00:00:00 2001
+From: Matthias Gerstner <matthias.gerstner@suse.de>
+Date: Wed, 27 Dec 2023 14:01:59 +0100
+Subject: [PATCH] pam_namespace: protect_dir(): use O_DIRECTORY to prevent
+ local DoS situations
+
+Without O_DIRECTORY the path crawling logic is subject to e.g. FIFOs
+being placed in user controlled directories, causing the PAM module to
+block indefinitely during `openat()`.
+
+Pass O_DIRECTORY to cause the `openat()` to fail if the path does not
+refer to a directory.
+
+With this the check whether the final path element is a directory
+becomes unnecessary, drop it.
+
+Upstream-Status: Backport [https://github.com/linux-pam/linux-pam/commit/031bb5a5d0d950253b68138b498dc93be69a64cb]
+CVE: CVE-2024-22365
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ modules/pam_namespace/pam_namespace.c | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/modules/pam_namespace/pam_namespace.c b/modules/pam_namespace/pam_namespace.c
+index 2528cff86..f72d67189 100644
+--- a/modules/pam_namespace/pam_namespace.c
++++ b/modules/pam_namespace/pam_namespace.c
+@@ -1201,7 +1201,7 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ int dfd = AT_FDCWD;
+ int dfd_next;
+ int save_errno;
+- int flags = O_RDONLY;
++ int flags = O_RDONLY | O_DIRECTORY;
+ int rv = -1;
+ struct stat st;
+
+@@ -1255,22 +1255,6 @@ static int protect_dir(const char *path, mode_t mode, int do_mkdir,
+ rv = openat(dfd, dir, flags);
+ }
+
+- if (rv != -1) {
+- if (fstat(rv, &st) != 0) {
+- save_errno = errno;
+- close(rv);
+- rv = -1;
+- errno = save_errno;
+- goto error;
+- }
+- if (!S_ISDIR(st.st_mode)) {
+- close(rv);
+- errno = ENOTDIR;
+- rv = -1;
+- goto error;
+- }
+- }
+-
+ if (flags & O_NOFOLLOW) {
+ /* we are inside user-owned dir - protect */
+ if (protect_mount(rv, p, idata) == -1) {
diff --git a/meta/recipes-extended/pam/libpam_1.3.1.bb b/meta/recipes-extended/pam/libpam_1.3.1.bb
index bc72afe6ad..527a368e2d 100644
--- a/meta/recipes-extended/pam/libpam_1.3.1.bb
+++ b/meta/recipes-extended/pam/libpam_1.3.1.bb
@@ -24,6 +24,7 @@ SRC_URI = "https://github.com/linux-pam/linux-pam/releases/download/v${PV}/Linux
file://pam-security-abstract-securetty-handling.patch \
file://pam-unix-nullok-secure.patch \
file://crypt_configure.patch \
+ file://CVE-2024-22365.patch \
"
SRC_URI[md5sum] = "558ff53b0fc0563ca97f79e911822165"
diff --git a/meta/recipes-extended/procps/procps/CVE-2023-4016.patch b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
new file mode 100644
index 0000000000..50582a8649
--- /dev/null
+++ b/meta/recipes-extended/procps/procps/CVE-2023-4016.patch
@@ -0,0 +1,85 @@
+From 2c933ecba3bb1d3041a5a7a53a7b4078a6003413 Mon Sep 17 00:00:00 2001
+From: Craig Small <csmall@dropbear.xyz>
+Date: Thu, 10 Aug 2023 21:18:38 +1000
+Subject: [PATCH] ps: Fix possible buffer overflow in -C option
+
+ps allocates memory using malloc(length of arg * len of struct).
+In certain strange circumstances, the arg length could be very large
+and the multiplecation will overflow, allocating a small amount of
+memory.
+
+Subsequent strncpy() will then write into unallocated memory.
+The fix is to use calloc. It's slower but this is a one-time
+allocation. Other malloc(x * y) calls have also been replaced
+by calloc(x, y)
+
+References:
+ https://www.freelists.org/post/procps/ps-buffer-overflow-CVE-20234016
+ https://nvd.nist.gov/vuln/detail/CVE-2023-4016
+ https://gitlab.com/procps-ng/procps/-/issues/297
+ https://bugs.debian.org/1042887
+
+Signed-off-by: Craig Small <csmall@dropbear.xyz>
+
+CVE: CVE-2023-4016
+Upstream-Status: Backport [https://gitlab.com/procps-ng/procps/-/commit/2c933ecba3bb1d3041a5a7a53a7b4078a6003413]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ NEWS | 1 +
+ ps/parser.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/NEWS b/NEWS
+index b9509734..64fa3da8 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,3 +1,5 @@
++ * ps: Fix buffer overflow in -C option CVE-2023-4016 Debian #1042887, issue #297
++
+ procps-ng-3.3.16
+ ----------------
+ * library: Increment to 8:2:0
+diff --git a/ps/parser.c b/ps/parser.c
+index 248aa741..15873dfa 100644
+--- a/ps/parser.c
++++ b/ps/parser.c
+@@ -184,7 +184,6 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ const char *err; /* error code that could or did happen */
+ /*** prepare to operate ***/
+ node = malloc(sizeof(selection_node));
+- node->u = malloc(strlen(arg)*sizeof(sel_union)); /* waste is insignificant */
+ node->n = 0;
+ buf = strdup(arg);
+ /*** sanity check and count items ***/
+@@ -205,6 +204,7 @@ static const char *parse_list(const char *arg, const char *(*parse_fn)(char *, s
+ } while (*++walk);
+ if(need_item) goto parse_error;
+ node->n = items;
++ node->u = calloc(items, sizeof(sel_union));
+ /*** actually parse the list ***/
+ walk = buf;
+ while(items--){
+@@ -1031,15 +1031,15 @@ static const char *parse_trailing_pids(void){
+ thisarg = ps_argc - 1; /* we must be at the end now */
+
+ pidnode = malloc(sizeof(selection_node));
+- pidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ pidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ pidnode->n = 0;
+
+ grpnode = malloc(sizeof(selection_node));
+- grpnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ grpnode->u = calloc(i,sizeof(sel_union)); /* waste is insignificant */
+ grpnode->n = 0;
+
+ sidnode = malloc(sizeof(selection_node));
+- sidnode->u = malloc(i*sizeof(sel_union)); /* waste is insignificant */
++ sidnode->u = calloc(i, sizeof(sel_union)); /* waste is insignificant */
+ sidnode->n = 0;
+
+ while(i--){
+--
+GitLab
+
diff --git a/meta/recipes-extended/procps/procps_3.3.16.bb b/meta/recipes-extended/procps/procps_3.3.16.bb
index 3a8289b359..ac27734a6f 100644
--- a/meta/recipes-extended/procps/procps_3.3.16.bb
+++ b/meta/recipes-extended/procps/procps_3.3.16.bb
@@ -14,6 +14,7 @@ inherit autotools gettext pkgconfig update-alternatives
SRC_URI = "git://gitlab.com/procps-ng/procps.git;protocol=https;branch=master \
file://sysctl.conf \
+ file://CVE-2023-4016.patch \
"
SRCREV = "59c88e18f29000ceaf7e5f98181b07be443cf12f"
diff --git a/meta/recipes-extended/screen/screen/CVE-2023-24626.patch b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
new file mode 100644
index 0000000000..73caf9d81b
--- /dev/null
+++ b/meta/recipes-extended/screen/screen/CVE-2023-24626.patch
@@ -0,0 +1,40 @@
+From e9ad41bfedb4537a6f0de20f00b27c7739f168f7 Mon Sep 17 00:00:00 2001
+From: Alexander Naumov <alexander_naumov@opensuse.org>
+Date: Mon, 30 Jan 2023 17:22:25 +0200
+Subject: fix: missing signal sending permission check on failed query messages
+
+Signed-off-by: Alexander Naumov <alexander_naumov@opensuse.org>
+
+CVE: CVE-2023-24626
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/screen.git/commit/?id=e9ad41bfedb4537a6f0de20f00b27c7739f168f7]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ socket.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/socket.c b/socket.c
+index bb68b35..9d87445 100644
+--- a/socket.c
++++ b/socket.c
+@@ -1285,11 +1285,16 @@ ReceiveMsg()
+ else
+ queryflag = -1;
+
+- Kill(m.m.command.apid,
++ if (CheckPid(m.m.command.apid)) {
++ Msg(0, "Query attempt with bad pid(%d)!", m.m.command.apid);
++ }
++ else {
++ Kill(m.m.command.apid,
+ (queryflag >= 0)
+ ? SIGCONT
+ : SIG_BYE); /* Send SIG_BYE if an error happened */
+- queryflag = -1;
++ queryflag = -1;
++ }
+ }
+ break;
+ case MSG_COMMAND:
+--
+2.25.1
+
diff --git a/meta/recipes-extended/screen/screen_4.8.0.bb b/meta/recipes-extended/screen/screen_4.8.0.bb
index fe640c262b..c4faa27023 100644
--- a/meta/recipes-extended/screen/screen_4.8.0.bb
+++ b/meta/recipes-extended/screen/screen_4.8.0.bb
@@ -22,6 +22,7 @@ SRC_URI = "${GNU_MIRROR}/screen/screen-${PV}.tar.gz \
file://0001-fix-for-multijob-build.patch \
file://0001-Remove-more-compatibility-stuff.patch \
file://CVE-2021-26937.patch \
+ file://CVE-2023-24626.patch \
"
SRC_URI[md5sum] = "d276213d3acd10339cd37848b8c4ab1e"
diff --git a/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
new file mode 100644
index 0000000000..aea07ff361
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/0001-Overhaul-valid_field.patch
@@ -0,0 +1,66 @@
+From 2eaea70111f65b16d55998386e4ceb4273c19eb4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20G=C3=B6ttsche?= <cgzones@googlemail.com>
+Date: Fri, 31 Mar 2023 14:46:50 +0200
+Subject: [PATCH] Overhaul valid_field()
+
+e5905c4b ("Added control character check") introduced checking for
+control characters but had the logic inverted, so it rejects all
+characters that are not control ones.
+
+Cast the character to `unsigned char` before passing to the character
+checking functions to avoid UB.
+
+Use strpbrk(3) for the illegal character test and return early.
+
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/2eaea70111f65b16d55998386e4ceb4273c19eb4]
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/fields.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index fb51b582..53929248 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -37,26 +37,22 @@ int valid_field (const char *field, const char *illegal)
+
+ /* For each character of field, search if it appears in the list
+ * of illegal characters. */
++ if (illegal && NULL != strpbrk (field, illegal)) {
++ return -1;
++ }
++
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+- if (strchr (illegal, *cp) != NULL) {
++ unsigned char c = *cp;
++ if (!isprint (c)) {
++ err = 1;
++ }
++ if (iscntrl (c)) {
+ err = -1;
+ break;
+ }
+ }
+
+- if (0 == err) {
+- /* Search if there are non-printable or control characters */
+- for (cp = field; '\0' != *cp; cp++) {
+- if (!isprint (*cp)) {
+- err = 1;
+- }
+- if (!iscntrl (*cp)) {
+- err = -1;
+- break;
+- }
+- }
+- }
+-
+ return err;
+ }
+
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-29383.patch b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
new file mode 100644
index 0000000000..dbf4a508e9
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-29383.patch
@@ -0,0 +1,54 @@
+From e5905c4b84d4fb90aefcd96ee618411ebfac663d Mon Sep 17 00:00:00 2001
+From: tomspiderlabs <128755403+tomspiderlabs@users.noreply.github.com>
+Date: Thu, 23 Mar 2023 23:39:38 +0000
+Subject: [PATCH] Added control character check
+
+Added control character check, returning -1 (to "err") if control characters are present.
+
+CVE: CVE-2023-29383
+Upstream-Status: Backport
+
+Reference to upstream:
+https://github.com/shadow-maint/shadow/commit/e5905c4b84d4fb90aefcd96ee618411ebfac663d
+
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/fields.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/lib/fields.c b/lib/fields.c
+index 640be931..fb51b582 100644
+--- a/lib/fields.c
++++ b/lib/fields.c
+@@ -21,9 +21,9 @@
+ *
+ * The supplied field is scanned for non-printable and other illegal
+ * characters.
+- * + -1 is returned if an illegal character is present.
+- * + 1 is returned if no illegal characters are present, but the field
+- * contains a non-printable character.
++ * + -1 is returned if an illegal or control character is present.
++ * + 1 is returned if no illegal or control characters are present,
++ * but the field contains a non-printable character.
+ * + 0 is returned otherwise.
+ */
+ int valid_field (const char *field, const char *illegal)
+@@ -45,10 +45,13 @@ int valid_field (const char *field, const char *illegal)
+ }
+
+ if (0 == err) {
+- /* Search if there are some non-printable characters */
++ /* Search if there are non-printable or control characters */
+ for (cp = field; '\0' != *cp; cp++) {
+ if (!isprint (*cp)) {
+ err = 1;
++ }
++ if (!iscntrl (*cp)) {
++ err = -1;
+ break;
+ }
+ }
+--
+2.34.1
+
diff --git a/meta/recipes-extended/shadow/files/CVE-2023-4641.patch b/meta/recipes-extended/shadow/files/CVE-2023-4641.patch
new file mode 100644
index 0000000000..75dbbad299
--- /dev/null
+++ b/meta/recipes-extended/shadow/files/CVE-2023-4641.patch
@@ -0,0 +1,146 @@
+From 51731b01fd9a608397da22b7b9164e4996f3d4c6 Mon Sep 17 00:00:00 2001
+From: Alejandro Colomar <alx@kernel.org>
+Date: Sat, 10 Jun 2023 16:20:05 +0200
+Subject: [PATCH] gpasswd(1): Fix password leak
+
+CVE: CVE-2023-4641
+Upstream-Status: Backport [https://github.com/shadow-maint/shadow/commit/65c88a43a23c2391dcc90c0abda3e839e9c57904]
+
+How to trigger this password leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When gpasswd(1) asks for the new password, it asks twice (as is usual
+for confirming the new password). Each of those 2 password prompts
+uses agetpass() to get the password. If the second agetpass() fails,
+the first password, which has been copied into the 'static' buffer
+'pass' via STRFCPY(), wasn't being zeroed.
+
+agetpass() is defined in <./libmisc/agetpass.c> (around line 91), and
+can fail for any of the following reasons:
+
+- malloc(3) or readpassphrase(3) failure.
+
+ These are going to be difficult to trigger. Maybe getting the system
+ to the limits of memory utilization at that exact point, so that the
+ next malloc(3) gets ENOMEM, and possibly even the OOM is triggered.
+ About readpassphrase(3), ENFILE and EINTR seem the only plausible
+ ones, and EINTR probably requires privilege or being the same user;
+ but I wouldn't discard ENFILE so easily, if a process starts opening
+ files.
+
+- The password is longer than PASS_MAX.
+
+ The is plausible with physical access. However, at that point, a
+ keylogger will be a much simpler attack.
+
+And, the attacker must be able to know when the second password is being
+introduced, which is not going to be easy.
+
+How to read the password after the leak?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Provoking the leak yourself at the right point by entering a very long
+password is easy, and inspecting the process stack at that point should
+be doable. Try to find some consistent patterns.
+
+Then, search for those patterns in free memory, right after the victim
+leaks their password.
+
+Once you get the leak, a program should read all the free memory
+searching for patterns that gpasswd(1) leaves nearby the leaked
+password.
+
+On 6/10/23 03:14, Seth Arnold wrote:
+> An attacker process wouldn't be able to use malloc(3) for this task.
+> There's a handful of tools available for userspace to allocate memory:
+>
+> - brk / sbrk
+> - mmap MAP_ANONYMOUS
+> - mmap /dev/zero
+> - mmap some other file
+> - shm_open
+> - shmget
+>
+> Most of these return only pages of zeros to a process. Using mmap of an
+> existing file, you can get some of the contents of the file demand-loaded
+> into the memory space on the first use.
+>
+> The MAP_UNINITIALIZED flag only works if the kernel was compiled with
+> CONFIG_MMAP_ALLOW_UNINITIALIZED. This is rare.
+>
+> malloc(3) doesn't zero memory, to our collective frustration, but all the
+> garbage in the allocations is from previous allocations in the current
+> process. It isn't leftover from other processes.
+>
+> The avenues available for reading the memory:
+> - /dev/mem and /dev/kmem (requires root, not available with Secure Boot)
+> - /proc/pid/mem (requires ptrace privileges, mediated by YAMA)
+> - ptrace (requires ptrace privileges, mediated by YAMA)
+> - causing memory to be swapped to disk, and then inspecting the swap
+>
+> These all require a certain amount of privileges.
+
+How to fix it?
+~~~~~~~~~~~~~~
+
+memzero(), which internally calls explicit_bzero(3), or whatever
+alternative the system provides with a slightly different name, will
+make sure that the buffer is zeroed in memory, and optimizations are not
+allowed to impede this zeroing.
+
+This is not really 100% effective, since compilers may place copies of
+the string somewhere hidden in the stack. Those copies won't get zeroed
+by explicit_bzero(3). However, that's arguably a compiler bug, since
+compilers should make everything possible to avoid optimizing strings
+that are later passed to explicit_bzero(3). But we all know that
+sometimes it's impossible to have perfect knowledge in the compiler, so
+this is plausible. Nevertheless, there's nothing we can do against such
+issues, except minimizing the time such passwords are stored in plain
+text.
+
+Security concerns
+~~~~~~~~~~~~~~~~~
+
+We believe this isn't easy to exploit. Nevertheless, and since the fix
+is trivial, this fix should probably be applied soon, and backported to
+all supported distributions, to prevent someone else having more
+imagination than us to find a way.
+
+Affected versions
+~~~~~~~~~~~~~~~~~
+
+All. Bug introduced in shadow 19990709. That's the second commit in
+the git history.
+
+Fixes: 45c6603cc86c ("[svn-upgrade] Integrating new upstream version, shadow (19990709)")
+Reported-by: Alejandro Colomar <alx@kernel.org>
+Cc: Serge Hallyn <serge@hallyn.com>
+Cc: Iker Pedrosa <ipedrosa@redhat.com>
+Cc: Seth Arnold <seth.arnold@canonical.com>
+Cc: Christian Brauner <christian@brauner.io>
+Cc: Balint Reczey <rbalint@debian.org>
+Cc: Sam James <sam@gentoo.org>
+Cc: David Runge <dvzrv@archlinux.org>
+Cc: Andreas Jaeger <aj@suse.de>
+Cc: <~hallyn/shadow@lists.sr.ht>
+Signed-off-by: Alejandro Colomar <alx@kernel.org>
+Signed-off-by: Hugo SIMELIERE <hsimeliere.opensource@witekio.com>
+---
+ src/gpasswd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/src/gpasswd.c b/src/gpasswd.c
+index 4d75af96..a698b32a 100644
+--- a/src/gpasswd.c
++++ b/src/gpasswd.c
+@@ -918,6 +918,7 @@ static void change_passwd (struct group *gr)
+ strzero (cp);
+ cp = getpass (_("Re-enter new password: "));
+ if (NULL == cp) {
++ memzero (pass, sizeof pass);
+ exit (1);
+ }
+
+--
+2.42.0
+
diff --git a/meta/recipes-extended/shadow/shadow.inc b/meta/recipes-extended/shadow/shadow.inc
index bfe50c18f6..c16292c38a 100644
--- a/meta/recipes-extended/shadow/shadow.inc
+++ b/meta/recipes-extended/shadow/shadow.inc
@@ -14,6 +14,9 @@ SRC_URI = "https://github.com/shadow-maint/shadow/releases/download/${PV}/${BP}.
file://shadow-4.1.3-dots-in-usernames.patch \
${@bb.utils.contains('PACKAGECONFIG', 'pam', '${PAM_SRC_URI}', '', d)} \
file://shadow-relaxed-usernames.patch \
+ file://CVE-2023-29383.patch \
+ file://0001-Overhaul-valid_field.patch \
+ file://CVE-2023-4641.patch \
"
SRC_URI_append_class-target = " \
diff --git a/meta/recipes-extended/shadow/shadow_4.8.1.bb b/meta/recipes-extended/shadow/shadow_4.8.1.bb
index ff4aad926f..9dfcd4bc10 100644
--- a/meta/recipes-extended/shadow/shadow_4.8.1.bb
+++ b/meta/recipes-extended/shadow/shadow_4.8.1.bb
@@ -9,3 +9,7 @@ BBCLASSEXTEND = "native nativesdk"
# Severity is low and marked as closed and won't fix.
# https://bugzilla.redhat.com/show_bug.cgi?id=884658
CVE_CHECK_WHITELIST += "CVE-2013-4235"
+
+# This is an issue for a different shadow
+CVE_CHECK_WHITELIST += "CVE-2016-15024"
+
diff --git a/meta/recipes-extended/sudo/files/CVE-2023-22809.patch b/meta/recipes-extended/sudo/files/CVE-2023-22809.patch
new file mode 100644
index 0000000000..6c47eb3e44
--- /dev/null
+++ b/meta/recipes-extended/sudo/files/CVE-2023-22809.patch
@@ -0,0 +1,113 @@
+Backport of:
+
+# HG changeset patch
+# Parent 7275148cad1f8cd3c350026460acc4d6ad349c3a
+sudoedit: do not permit editor arguments to include "--"
+We use "--" to separate the editor and arguments from the files to edit.
+If the editor arguments include "--", sudo can be tricked into allowing
+the user to edit a file not permitted by the security policy.
+Thanks to Matthieu Barjole and Victor Cutillas of Synacktiv
+(https://synacktiv.com) for finding this bug.
+
+CVE: CVE-2023-22809
+Upstream-Staus: Backport [http://archive.ubuntu.com/ubuntu/pool/main/s/sudo/sudo_1.8.31-1ubuntu1.4.debian.tar.xz]
+Signed-off-by: Omkar Patil <omkar.patil@kpit.com>
+
+--- a/plugins/sudoers/editor.c
++++ b/plugins/sudoers/editor.c
+@@ -56,7 +56,7 @@ resolve_editor(const char *ed, size_t ed
+ const char *cp, *ep, *tmp;
+ const char *edend = ed + edlen;
+ struct stat user_editor_sb;
+- int nargc;
++ int nargc = 0;
+ debug_decl(resolve_editor, SUDOERS_DEBUG_UTIL)
+
+ /*
+@@ -102,6 +102,21 @@ resolve_editor(const char *ed, size_t ed
+ free(editor_path);
+ while (nargc--)
+ free(nargv[nargc]);
++ free(nargv);
++ debug_return_str(NULL);
++ }
++
++ /*
++ * We use "--" to separate the editor and arguments from the files
++ * to edit. The editor arguments themselves may not contain "--".
++ */
++ if (strcmp(nargv[nargc], "--") == 0) {
++ sudo_warnx(U_("ignoring editor: %.*s"), (int)edlen, ed);
++ sudo_warnx("%s", U_("editor arguments may not contain \"--\""));
++ errno = EINVAL;
++ free(editor_path);
++ while (nargc--)
++ free(nargv[nargc]);
+ free(nargv);
+ debug_return_str(NULL);
+ }
+--- a/plugins/sudoers/sudoers.c
++++ b/plugins/sudoers/sudoers.c
+@@ -616,20 +616,31 @@ sudoers_policy_main(int argc, char * con
+
+ /* Note: must call audit before uid change. */
+ if (ISSET(sudo_mode, MODE_EDIT)) {
++ const char *env_editor = NULL;
+ int edit_argc;
+- const char *env_editor;
+
+ free(safe_cmnd);
+ safe_cmnd = find_editor(NewArgc - 1, NewArgv + 1, &edit_argc,
+ &edit_argv, NULL, &env_editor, false);
+ if (safe_cmnd == NULL) {
+- if (errno != ENOENT)
++ switch (errno) {
++ case ENOENT:
++ audit_failure(NewArgc, NewArgv, N_("%s: command not found"),
++ env_editor ? env_editor : def_editor);
++ sudo_warnx(U_("%s: command not found"),
++ env_editor ? env_editor : def_editor);
++ goto bad;
++ case EINVAL:
++ if (def_env_editor && env_editor != NULL) {
++ /* User tried to do something funny with the editor. */
++ log_warningx(SLOG_NO_STDERR|SLOG_SEND_MAIL,
++ "invalid user-specified editor: %s", env_editor);
++ goto bad;
++ }
++ /* FALLTHROUGH */
++ default:
+ goto done;
+- audit_failure(NewArgc, NewArgv, N_("%s: command not found"),
+- env_editor ? env_editor : def_editor);
+- sudo_warnx(U_("%s: command not found"),
+- env_editor ? env_editor : def_editor);
+- goto bad;
++ }
+ }
+ if (audit_success(edit_argc, edit_argv) != 0 && !def_ignore_audit_errors)
+ goto done;
+--- a/plugins/sudoers/visudo.c
++++ b/plugins/sudoers/visudo.c
+@@ -308,7 +308,7 @@ static char *
+ get_editor(int *editor_argc, char ***editor_argv)
+ {
+ char *editor_path = NULL, **whitelist = NULL;
+- const char *env_editor;
++ const char *env_editor = NULL;
+ static char *files[] = { "+1", "sudoers" };
+ unsigned int whitelist_len = 0;
+ debug_decl(get_editor, SUDOERS_DEBUG_UTIL)
+@@ -342,7 +342,11 @@ get_editor(int *editor_argc, char ***edi
+ if (editor_path == NULL) {
+ if (def_env_editor && env_editor != NULL) {
+ /* We are honoring $EDITOR so this is a fatal error. */
+- sudo_fatalx(U_("specified editor (%s) doesn't exist"), env_editor);
++ if (errno == ENOENT) {
++ sudo_warnx(U_("specified editor (%s) doesn't exist"),
++ env_editor);
++ }
++ exit(EXIT_FAILURE);
+ }
+ sudo_fatalx(U_("no editor found (editor path = %s)"), def_editor);
+ }
diff --git a/meta/recipes-extended/sudo/sudo.inc b/meta/recipes-extended/sudo/sudo.inc
index 153731c807..9c7279d25a 100644
--- a/meta/recipes-extended/sudo/sudo.inc
+++ b/meta/recipes-extended/sudo/sudo.inc
@@ -3,7 +3,7 @@ DESCRIPTION = "Sudo (superuser do) allows a system administrator to give certain
HOMEPAGE = "http://www.sudo.ws"
BUGTRACKER = "http://www.sudo.ws/bugs/"
SECTION = "admin"
-LICENSE = "ISC & BSD & Zlib"
+LICENSE = "ISC & BSD-3-Clause & BSD-2-Clause & Zlib"
LIC_FILES_CHKSUM = "file://doc/LICENSE;md5=07966675feaddba70cc812895b248230 \
file://plugins/sudoers/redblack.c;beginline=1;endline=46;md5=03e35317699ba00b496251e0dfe9f109 \
file://lib/util/reallocarray.c;beginline=3;endline=15;md5=397dd45c7683e90b9f8bf24638cf03bf \
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch b/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch
new file mode 100644
index 0000000000..1336c7701d
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2022-43995.patch
@@ -0,0 +1,59 @@
+From e1554d7996a59bf69544f3d8dd4ae683027948f9 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 15 Nov 2022 09:17:18 +0530
+Subject: [PATCH] CVE-2022-43995
+
+Upstream-Status: Backport [https://github.com/sudo-project/sudo/commit/bd209b9f16fcd1270c13db27ae3329c677d48050]
+CVE: CVE-2022-43995
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+Potential heap overflow for passwords < 8
+characters. Starting with sudo 1.8.0 the plaintext password buffer is
+dynamically sized so it is not safe to assume that it is at least 9 bytes in
+size.
+Found by Hugo Lefeuvre (University of Manchester) with ConfFuzz.
+---
+ plugins/sudoers/auth/passwd.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/plugins/sudoers/auth/passwd.c b/plugins/sudoers/auth/passwd.c
+index 03c7a16..76a7824 100644
+--- a/plugins/sudoers/auth/passwd.c
++++ b/plugins/sudoers/auth/passwd.c
+@@ -63,7 +63,7 @@ sudo_passwd_init(struct passwd *pw, sudo_auth *auth)
+ int
+ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_conv_callback *callback)
+ {
+- char sav, *epass;
++ char des_pass[9], *epass;
+ char *pw_epasswd = auth->data;
+ size_t pw_len;
+ int matched = 0;
+@@ -75,12 +75,12 @@ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_c
+
+ /*
+ * Truncate to 8 chars if standard DES since not all crypt()'s do this.
+- * If this turns out not to be safe we will have to use OS #ifdef's (sigh).
+ */
+- sav = pass[8];
+ pw_len = strlen(pw_epasswd);
+- if (pw_len == DESLEN || HAS_AGEINFO(pw_epasswd, pw_len))
+- pass[8] = '\0';
++ if (pw_len == DESLEN || HAS_AGEINFO(pw_epasswd, pw_len)) {
++ strlcpy(des_pass, pass, sizeof(des_pass));
++ pass = des_pass;
++ }
+
+ /*
+ * Normal UN*X password check.
+@@ -88,7 +88,6 @@ sudo_passwd_verify(struct passwd *pw, char *pass, sudo_auth *auth, struct sudo_c
+ * only compare the first DESLEN characters in that case.
+ */
+ epass = (char *) crypt(pass, pw_epasswd);
+- pass[8] = sav;
+ if (epass != NULL) {
+ if (HAS_AGEINFO(pw_epasswd, pw_len) && strlen(epass) == DESLEN)
+ matched = !strncmp(pw_epasswd, epass, DESLEN);
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch
new file mode 100644
index 0000000000..bc6f8c19a6
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-1.patch
@@ -0,0 +1,646 @@
+Origin: Backport obtained from SUSE. Thanks!
+
+From 334daf92b31b79ce68ed75e2ee14fca265f029ca Mon Sep 17 00:00:00 2001
+From: "Todd C. Miller" <Todd.Miller@sudo.ws>
+Date: Wed, 18 Jan 2023 08:21:34 -0700
+Subject: [PATCH] Escape control characters in log messages and "sudoreplay -l"
+ output. The log message contains user-controlled strings that could include
+ things like terminal control characters. Space characters in the command
+ path are now also escaped.
+
+Command line arguments that contain spaces are surrounded with
+single quotes and any literal single quote or backslash characters
+are escaped with a backslash. This makes it possible to distinguish
+multiple command line arguments from a single argument that contains
+spaces.
+
+Issue found by Matthieu Barjole and Victor Cutillas of Synacktiv
+(https://synacktiv.com).
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/sudo/tree/debian/patches/CVE-2023-2848x-1.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/sudo-project/sudo/commit/334daf92b31b79ce68ed75e2ee14fca265f029ca]
+CVE: CVE-2023-28486 CVE-2023-28487
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ doc/sudoers.man.in | 33 +++++++--
+ doc/sudoers.mdoc.in | 28 ++++++--
+ doc/sudoreplay.man.in | 9 ++
+ doc/sudoreplay.mdoc.in | 10 ++
+ include/sudo_compat.h | 6 +
+ include/sudo_lbuf.h | 7 ++
+ lib/util/lbuf.c | 106 +++++++++++++++++++++++++++++++
+ lib/util/util.exp.in | 1
+ plugins/sudoers/logging.c | 145 +++++++++++--------------------------------
+ plugins/sudoers/sudoreplay.c | 44 +++++++++----
+ 10 files changed, 257 insertions(+), 132 deletions(-)
+
+--- a/doc/sudoers.man.in
++++ b/doc/sudoers.man.in
+@@ -4566,6 +4566,19 @@ can log events using either
+ syslog(3)
+ or a simple log file.
+ The log format is almost identical in both cases.
++Any control characters present in the log data are formatted in octal
++with a leading
++\(oq#\(cq
++character.
++For example, a horizontal tab is stored as
++\(oq#011\(cq
++and an embedded carriage return is stored as
++\(oq#015\(cq.
++In addition, space characters in the command path are stored as
++\(oq#040\(cq.
++Literal single quotes and backslash characters
++(\(oq\e\(cq)
++in command line arguments are escaped with a backslash.
+ .SS "Accepted command log entries"
+ Commands that sudo runs are logged using the following format (split
+ into multiple lines for readability):
+@@ -4646,7 +4659,7 @@ A list of environment variables specifie
+ if specified.
+ .TP 14n
+ command
+-The actual command that was executed.
++The actual command that was executed, including any command line arguments.
+ .PP
+ Messages are logged using the locale specified by
+ \fIsudoers_locale\fR,
+@@ -4882,17 +4895,21 @@ with a few important differences:
+ 1.\&
+ The
+ \fIprogname\fR
+-and
+-\fIhostname\fR
+-fields are not present.
++field is not present.
+ .TP 5n
+ 2.\&
+-If the
+-\fIlog_year\fR
+-option is enabled,
+-the date will also include the year.
++The
++\fIhostname\fR
++is only logged if the
++\fIlog_host\fR
++option is enabled.
+ .TP 5n
+ 3.\&
++The date does not include the year unless the
++\fIlog_year\fR
++option is enabled.
++.TP 5n
++4.\&
+ Lines that are longer than
+ \fIloglinelen\fR
+ characters (80 by default) are word-wrapped and continued on the
+--- a/doc/sudoers.mdoc.in
++++ b/doc/sudoers.mdoc.in
+@@ -4261,6 +4261,19 @@ can log events using either
+ .Xr syslog 3
+ or a simple log file.
+ The log format is almost identical in both cases.
++Any control characters present in the log data are formatted in octal
++with a leading
++.Ql #
++character.
++For example, a horizontal tab is stored as
++.Ql #011
++and an embedded carriage return is stored as
++.Ql #015 .
++In addition, space characters in the command path are stored as
++.Ql #040 .
++Literal single quotes and backslash characters
++.Pq Ql \e
++in command line arguments are escaped with a backslash.
+ .Ss Accepted command log entries
+ Commands that sudo runs are logged using the following format (split
+ into multiple lines for readability):
+@@ -4328,7 +4341,7 @@ option is enabled.
+ A list of environment variables specified on the command line,
+ if specified.
+ .It command
+-The actual command that was executed.
++The actual command that was executed, including any command line arguments.
+ .El
+ .Pp
+ Messages are logged using the locale specified by
+@@ -4550,14 +4563,17 @@ with a few important differences:
+ .It
+ The
+ .Em progname
+-and
++field is not present.
++.It
++The
+ .Em hostname
+-fields are not present.
++is only logged if the
++.Em log_host
++option is enabled.
+ .It
+-If the
++The date does not include the year unless the
+ .Em log_year
+-option is enabled,
+-the date will also include the year.
++option is enabled.
+ .It
+ Lines that are longer than
+ .Em loglinelen
+--- a/doc/sudoreplay.man.in
++++ b/doc/sudoreplay.man.in
+@@ -149,6 +149,15 @@ In this mode,
+ will list available sessions in a format similar to the
+ \fBsudo\fR
+ log file format, sorted by file name (or sequence number).
++Any control characters present in the log data are formated in octal
++with a leading
++\(oq#\(cq
++character.
++For example, a horizontal tab is displayed as
++\(oq#011\(cq
++and an embedded carriage return is displayed as
++\(oq#015\(cq.
++.sp
+ If a
+ \fIsearch expression\fR
+ is specified, it will be used to restrict the IDs that are displayed.
+--- a/doc/sudoreplay.mdoc.in
++++ b/doc/sudoreplay.mdoc.in
+@@ -142,6 +142,16 @@ In this mode,
+ will list available sessions in a format similar to the
+ .Nm sudo
+ log file format, sorted by file name (or sequence number).
++Any control characters present in the log data are formatted in octal
++with a leading
++.Ql #
++character.
++For example, a horizontal tab is displayed as
++.Ql #011
++and an embedded carriage return is displayed as
++.Ql #015 .
++Space characters in the command name and arguments are also formatted in octal.
++.Pp
+ If a
+ .Ar search expression
+ is specified, it will be used to restrict the IDs that are displayed.
+--- a/include/sudo_compat.h
++++ b/include/sudo_compat.h
+@@ -79,6 +79,12 @@
+ # endif
+ #endif
+
++#ifdef HAVE_FALLTHROUGH_ATTRIBUTE
++# define FALLTHROUGH __attribute__((__fallthrough__))
++#else
++# define FALLTHROUGH do { } while (0)
++#endif
++
+ /*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure.
+--- a/include/sudo_lbuf.h
++++ b/include/sudo_lbuf.h
+@@ -36,9 +36,15 @@ struct sudo_lbuf {
+
+ typedef int (*sudo_lbuf_output_t)(const char *);
+
++/* Flags for sudo_lbuf_append_esc() */
++#define LBUF_ESC_CNTRL 0x01
++#define LBUF_ESC_BLANK 0x02
++#define LBUF_ESC_QUOTE 0x04
++
+ __dso_public void sudo_lbuf_init_v1(struct sudo_lbuf *lbuf, sudo_lbuf_output_t output, int indent, const char *continuation, int cols);
+ __dso_public void sudo_lbuf_destroy_v1(struct sudo_lbuf *lbuf);
+ __dso_public bool sudo_lbuf_append_v1(struct sudo_lbuf *lbuf, const char *fmt, ...) __printflike(2, 3);
++__dso_public bool sudo_lbuf_append_esc_v1(struct sudo_lbuf *lbuf, int flags, const char *fmt, ...) __printflike(3, 4);
+ __dso_public bool sudo_lbuf_append_quoted_v1(struct sudo_lbuf *lbuf, const char *set, const char *fmt, ...) __printflike(3, 4);
+ __dso_public void sudo_lbuf_print_v1(struct sudo_lbuf *lbuf);
+ __dso_public bool sudo_lbuf_error_v1(struct sudo_lbuf *lbuf);
+@@ -47,6 +53,7 @@ __dso_public void sudo_lbuf_clearerr_v1(
+ #define sudo_lbuf_init(_a, _b, _c, _d, _e) sudo_lbuf_init_v1((_a), (_b), (_c), (_d), (_e))
+ #define sudo_lbuf_destroy(_a) sudo_lbuf_destroy_v1((_a))
+ #define sudo_lbuf_append sudo_lbuf_append_v1
++#define sudo_lbuf_append_esc sudo_lbuf_append_esc_v1
+ #define sudo_lbuf_append_quoted sudo_lbuf_append_quoted_v1
+ #define sudo_lbuf_print(_a) sudo_lbuf_print_v1((_a))
+ #define sudo_lbuf_error(_a) sudo_lbuf_error_v1((_a))
+--- a/lib/util/lbuf.c
++++ b/lib/util/lbuf.c
+@@ -93,6 +93,112 @@ sudo_lbuf_expand(struct sudo_lbuf *lbuf,
+ }
+
+ /*
++ * Escape a character in octal form (#0n) and store it as a string
++ * in buf, which must have at least 6 bytes available.
++ * Returns the length of buf, not counting the terminating NUL byte.
++ */
++static int
++escape(unsigned char ch, char *buf)
++{
++ const int len = ch < 0100 ? (ch < 010 ? 3 : 4) : 5;
++
++ /* Work backwards from the least significant digit to most significant. */
++ switch (len) {
++ case 5:
++ buf[4] = (ch & 7) + '0';
++ ch >>= 3;
++ FALLTHROUGH;
++ case 4:
++ buf[3] = (ch & 7) + '0';
++ ch >>= 3;
++ FALLTHROUGH;
++ case 3:
++ buf[2] = (ch & 7) + '0';
++ buf[1] = '0';
++ buf[0] = '#';
++ break;
++ }
++ buf[len] = '\0';
++
++ return len;
++}
++
++/*
++ * Parse the format and append strings, only %s and %% escapes are supported.
++ * Any non-printable characters are escaped in octal as #0nn.
++ */
++bool
++sudo_lbuf_append_esc_v1(struct sudo_lbuf *lbuf, int flags, const char *fmt, ...)
++{
++ unsigned int saved_len = lbuf->len;
++ bool ret = false;
++ const char *s;
++ va_list ap;
++ debug_decl(sudo_lbuf_append_esc, SUDO_DEBUG_UTIL);
++
++ if (sudo_lbuf_error(lbuf))
++ debug_return_bool(false);
++
++#define should_escape(ch) \
++ ((ISSET(flags, LBUF_ESC_CNTRL) && iscntrl((unsigned char)ch)) || \
++ (ISSET(flags, LBUF_ESC_BLANK) && isblank((unsigned char)ch)))
++#define should_quote(ch) \
++ (ISSET(flags, LBUF_ESC_QUOTE) && (ch == '\'' || ch == '\\'))
++
++ va_start(ap, fmt);
++ while (*fmt != '\0') {
++ if (fmt[0] == '%' && fmt[1] == 's') {
++ if ((s = va_arg(ap, char *)) == NULL)
++ s = "(NULL)";
++ while (*s != '\0') {
++ if (should_escape(*s)) {
++ if (!sudo_lbuf_expand(lbuf, sizeof("#0177") - 1))
++ goto done;
++ lbuf->len += escape(*s++, lbuf->buf + lbuf->len);
++ continue;
++ }
++ if (should_quote(*s)) {
++ if (!sudo_lbuf_expand(lbuf, 2))
++ goto done;
++ lbuf->buf[lbuf->len++] = '\\';
++ lbuf->buf[lbuf->len++] = *s++;
++ continue;
++ }
++ if (!sudo_lbuf_expand(lbuf, 1))
++ goto done;
++ lbuf->buf[lbuf->len++] = *s++;
++ }
++ fmt += 2;
++ continue;
++ }
++ if (should_escape(*fmt)) {
++ if (!sudo_lbuf_expand(lbuf, sizeof("#0177") - 1))
++ goto done;
++ if (*fmt == '\'') {
++ lbuf->buf[lbuf->len++] = '\\';
++ lbuf->buf[lbuf->len++] = *fmt++;
++ } else {
++ lbuf->len += escape(*fmt++, lbuf->buf + lbuf->len);
++ }
++ continue;
++ }
++ if (!sudo_lbuf_expand(lbuf, 1))
++ goto done;
++ lbuf->buf[lbuf->len++] = *fmt++;
++ }
++ ret = true;
++
++done:
++ if (!ret)
++ lbuf->len = saved_len;
++ if (lbuf->size != 0)
++ lbuf->buf[lbuf->len] = '\0';
++ va_end(ap);
++
++ debug_return_bool(ret);
++}
++
++/*
+ * Parse the format and append strings, only %s and %% escapes are supported.
+ * Any characters in set are quoted with a backslash.
+ */
+--- a/lib/util/util.exp.in
++++ b/lib/util/util.exp.in
+@@ -79,6 +79,7 @@ sudo_gethostname_v1
+ sudo_gettime_awake_v1
+ sudo_gettime_mono_v1
+ sudo_gettime_real_v1
++sudo_lbuf_append_esc_v1
+ sudo_lbuf_append_quoted_v1
+ sudo_lbuf_append_v1
+ sudo_lbuf_clearerr_v1
+--- a/plugins/sudoers/logging.c
++++ b/plugins/sudoers/logging.c
+@@ -58,6 +58,7 @@
+ #include <syslog.h>
+
+ #include "sudoers.h"
++#include "sudo_lbuf.h"
+
+ #ifndef HAVE_GETADDRINFO
+ # include "compat/getaddrinfo.h"
+@@ -940,14 +941,6 @@ should_mail(int status)
+ (def_mail_no_perms && !ISSET(status, VALIDATE_SUCCESS)));
+ }
+
+-#define LL_TTY_STR "TTY="
+-#define LL_CWD_STR "PWD=" /* XXX - should be CWD= */
+-#define LL_USER_STR "USER="
+-#define LL_GROUP_STR "GROUP="
+-#define LL_ENV_STR "ENV="
+-#define LL_CMND_STR "COMMAND="
+-#define LL_TSID_STR "TSID="
+-
+ #define IS_SESSID(s) ( \
+ isalnum((unsigned char)(s)[0]) && isalnum((unsigned char)(s)[1]) && \
+ (s)[2] == '/' && \
+@@ -962,14 +955,16 @@ should_mail(int status)
+ static char *
+ new_logline(const char *message, const char *errstr)
+ {
+- char *line = NULL, *evstr = NULL;
+ #ifndef SUDOERS_NO_SEQ
+ char sessid[7];
+ #endif
+ const char *tsid = NULL;
+- size_t len = 0;
++ struct sudo_lbuf lbuf;
++ int i;
+ debug_decl(new_logline, SUDOERS_DEBUG_LOGGING)
+
++ sudo_lbuf_init(&lbuf, NULL, 0, NULL, 0);
++
+ #ifndef SUDOERS_NO_SEQ
+ /* A TSID may be a sudoers-style session ID or a free-form string. */
+ if (sudo_user.iolog_file != NULL) {
+@@ -989,119 +984,55 @@ new_logline(const char *message, const c
+ #endif
+
+ /*
+- * Compute line length
++ * Format the log line as an lbuf, escaping control characters in
++ * octal form (#0nn). Error checking (ENOMEM) is done at the end.
+ */
+- if (message != NULL)
+- len += strlen(message) + 3;
+- if (errstr != NULL)
+- len += strlen(errstr) + 3;
+- len += sizeof(LL_TTY_STR) + 2 + strlen(user_tty);
+- len += sizeof(LL_CWD_STR) + 2 + strlen(user_cwd);
+- if (runas_pw != NULL)
+- len += sizeof(LL_USER_STR) + 2 + strlen(runas_pw->pw_name);
+- if (runas_gr != NULL)
+- len += sizeof(LL_GROUP_STR) + 2 + strlen(runas_gr->gr_name);
+- if (tsid != NULL)
+- len += sizeof(LL_TSID_STR) + 2 + strlen(tsid);
+- if (sudo_user.env_vars != NULL) {
+- size_t evlen = 0;
+- char * const *ep;
+-
+- for (ep = sudo_user.env_vars; *ep != NULL; ep++)
+- evlen += strlen(*ep) + 1;
+- if (evlen != 0) {
+- if ((evstr = malloc(evlen)) == NULL)
+- goto oom;
+- evstr[0] = '\0';
+- for (ep = sudo_user.env_vars; *ep != NULL; ep++) {
+- strlcat(evstr, *ep, evlen);
+- strlcat(evstr, " ", evlen); /* NOTE: last one will fail */
+- }
+- len += sizeof(LL_ENV_STR) + 2 + evlen;
+- }
+- }
+- if (user_cmnd != NULL) {
+- /* Note: we log "sudo -l command arg ..." as "list command arg ..." */
+- len += sizeof(LL_CMND_STR) - 1 + strlen(user_cmnd);
+- if (ISSET(sudo_mode, MODE_CHECK))
+- len += sizeof("list ") - 1;
+- if (user_args != NULL)
+- len += strlen(user_args) + 1;
+- }
+-
+- /*
+- * Allocate and build up the line.
+- */
+- if ((line = malloc(++len)) == NULL)
+- goto oom;
+- line[0] = '\0';
+
+ if (message != NULL) {
+- if (strlcat(line, message, len) >= len ||
+- strlcat(line, errstr ? " : " : " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "%s%s", message,
++ errstr ? " : " : " ; ");
+ }
+ if (errstr != NULL) {
+- if (strlcat(line, errstr, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- }
+- if (strlcat(line, LL_TTY_STR, len) >= len ||
+- strlcat(line, user_tty, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- if (strlcat(line, LL_CWD_STR, len) >= len ||
+- strlcat(line, user_cwd, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "%s ; ", errstr);
++ }
++ if (user_tty != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "TTY=%s ; ", user_tty);
++ }
++ if (user_cwd != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "PWD=%s ; ", user_cwd);
++ }
+ if (runas_pw != NULL) {
+- if (strlcat(line, LL_USER_STR, len) >= len ||
+- strlcat(line, runas_pw->pw_name, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "USER=%s ; ",
++ runas_pw->pw_name);
+ }
+ if (runas_gr != NULL) {
+- if (strlcat(line, LL_GROUP_STR, len) >= len ||
+- strlcat(line, runas_gr->gr_name, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "GROUP=%s ; ",
++ runas_gr->gr_name);
+ }
+ if (tsid != NULL) {
+- if (strlcat(line, LL_TSID_STR, len) >= len ||
+- strlcat(line, tsid, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- }
+- if (evstr != NULL) {
+- if (strlcat(line, LL_ENV_STR, len) >= len ||
+- strlcat(line, evstr, len) >= len ||
+- strlcat(line, " ; ", len) >= len)
+- goto toobig;
+- free(evstr);
+- evstr = NULL;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "TSID=%s ; ", tsid);
++ }
++ if (sudo_user.env_vars != NULL) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, "ENV=%s", sudo_user.env_vars[0]);
++ for (i = 1; sudo_user.env_vars[i] != NULL; i++) {
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, " %s",
++ sudo_user.env_vars[i]);
++ }
+ }
+ if (user_cmnd != NULL) {
+- if (strlcat(line, LL_CMND_STR, len) >= len)
+- goto toobig;
+- if (ISSET(sudo_mode, MODE_CHECK) && strlcat(line, "list ", len) >= len)
+- goto toobig;
+- if (strlcat(line, user_cmnd, len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL|LBUF_ESC_BLANK,
++ "COMMAND=%s", user_cmnd);
+ if (user_args != NULL) {
+- if (strlcat(line, " ", len) >= len ||
+- strlcat(line, user_args, len) >= len)
+- goto toobig;
++ sudo_lbuf_append_esc(&lbuf,
++ LBUF_ESC_CNTRL|LBUF_ESC_QUOTE,
++ " %s", user_args);
+ }
+ }
+
+- debug_return_str(line);
+-oom:
+- free(evstr);
++ if (!sudo_lbuf_error(&lbuf))
++ debug_return_str(lbuf.buf);
++
++ sudo_lbuf_destroy(&lbuf);
+ sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
+ debug_return_str(NULL);
+-toobig:
+- free(evstr);
+- free(line);
+- sudo_warnx(U_("internal error, %s overflow"), __func__);
+- debug_return_str(NULL);
+ }
+--- a/plugins/sudoers/sudoreplay.c
++++ b/plugins/sudoers/sudoreplay.c
+@@ -71,6 +71,7 @@
+ #include "sudo_conf.h"
+ #include "sudo_debug.h"
+ #include "sudo_event.h"
++#include "sudo_lbuf.h"
+ #include "sudo_util.h"
+
+ #ifdef HAVE_GETOPT_LONG
+@@ -1353,7 +1354,8 @@ match_expr(struct search_node_list *head
+ }
+
+ static int
+-list_session(char *logfile, regex_t *re, const char *user, const char *tty)
++list_session(struct sudo_lbuf *lbuf, char *logfile, regex_t *re,
++ const char *user, const char *tty)
+ {
+ char idbuf[7], *idstr, *cp;
+ const char *timestr;
+@@ -1386,16 +1388,32 @@ list_session(char *logfile, regex_t *re,
+ }
+ /* XXX - print rows + cols? */
+ timestr = get_timestr(li->tstamp, 1);
+- printf("%s : %s : TTY=%s ; CWD=%s ; USER=%s ; ",
+- timestr ? timestr : "invalid date",
+- li->user, li->tty, li->cwd, li->runas_user);
+- if (li->runas_group)
+- printf("GROUP=%s ; ", li->runas_group);
+- printf("TSID=%s ; COMMAND=%s\n", idstr, li->cmd);
+-
+- ret = 0;
+-
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "%s : %s : ",
++ timestr ? timestr : "invalid date", li->user);
++ if (li->tty != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "TTY=%s ; ",
++ li->tty);
++ }
++ if (li->cwd != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "CWD=%s ; ",
++ li->cwd);
++ }
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "USER=%s ; ", li->runas_user);
++ if (li->runas_group != NULL) {
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "GROUP=%s ; ",
++ li->runas_group);
++ }
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "TSID=%s ; ", idstr);
++ sudo_lbuf_append_esc(lbuf, LBUF_ESC_CNTRL, "COMMAND=%s",
++ li->cmd);
++
++ if (!sudo_lbuf_error(lbuf)) {
++ puts(lbuf->buf);
++ ret = 0;
++ }
+ done:
++ lbuf->error = 0;
++ lbuf->len = 0;
+ free_log_info(li);
+ debug_return_int(ret);
+ }
+@@ -1415,6 +1433,7 @@ find_sessions(const char *dir, regex_t *
+ DIR *d;
+ struct dirent *dp;
+ struct stat sb;
++ struct sudo_lbuf lbuf;
+ size_t sdlen, sessions_len = 0, sessions_size = 0;
+ unsigned int i;
+ int len;
+@@ -1426,6 +1445,8 @@ find_sessions(const char *dir, regex_t *
+ #endif
+ debug_decl(find_sessions, SUDO_DEBUG_UTIL)
+
++ sudo_lbuf_init(&lbuf, NULL, 0, NULL, 0);
++
+ d = opendir(dir);
+ if (d == NULL)
+ sudo_fatal(U_("unable to open %s"), dir);
+@@ -1485,7 +1506,7 @@ find_sessions(const char *dir, regex_t *
+
+ /* Check for dir with a log file. */
+ if (lstat(pathbuf, &sb) == 0 && S_ISREG(sb.st_mode)) {
+- list_session(pathbuf, re, user, tty);
++ list_session(&lbuf, pathbuf, re, user, tty);
+ } else {
+ /* Strip off "/log" and recurse if a dir. */
+ pathbuf[sdlen + len - 4] = '\0';
+@@ -1496,6 +1517,7 @@ find_sessions(const char *dir, regex_t *
+ }
+ free(sessions);
+ }
++ sudo_lbuf_destroy(&lbuf);
+
+ debug_return_int(0);
+ }
diff --git a/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch
new file mode 100644
index 0000000000..d021873b70
--- /dev/null
+++ b/meta/recipes-extended/sudo/sudo/CVE-2023-28486_CVE-2023-28487-2.patch
@@ -0,0 +1,26 @@
+Backport of:
+
+From 12648b4e0a8cf486480442efd52f0e0b6cab6e8b Mon Sep 17 00:00:00 2001
+From: "Todd C. Miller" <Todd.Miller@sudo.ws>
+Date: Mon, 13 Mar 2023 08:04:32 -0600
+Subject: [PATCH] Add missing " ; " separator between environment variables and
+ command. This is a regression introduced in sudo 1.9.13. GitHub issue #254.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/sudo/tree/debian/patches/CVE-2023-2848x-2.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/sudo-project/sudo/commit/12648b4e0a8cf486480442efd52f0e0b6cab6e8b]
+CVE: CVE-2023-28486 CVE-2023-28487
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/eventlog/eventlog.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/plugins/sudoers/logging.c
++++ b/plugins/sudoers/logging.c
+@@ -1018,6 +1018,7 @@ new_logline(const char *message, const c
+ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL, " %s",
+ sudo_user.env_vars[i]);
+ }
++ sudo_lbuf_append(&lbuf, " ; ");
+ }
+ if (user_cmnd != NULL) {
+ sudo_lbuf_append_esc(&lbuf, LBUF_ESC_CNTRL|LBUF_ESC_BLANK,
diff --git a/meta/recipes-extended/sudo/sudo_1.8.32.bb b/meta/recipes-extended/sudo/sudo_1.8.32.bb
index 8d16ec2538..e35bbfa789 100644
--- a/meta/recipes-extended/sudo/sudo_1.8.32.bb
+++ b/meta/recipes-extended/sudo/sudo_1.8.32.bb
@@ -4,6 +4,10 @@ SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \
${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
file://0001-Include-sys-types.h-for-id_t-definition.patch \
file://0001-Fix-includes-when-building-with-musl.patch \
+ file://CVE-2022-43995.patch \
+ file://CVE-2023-22809.patch \
+ file://CVE-2023-28486_CVE-2023-28487-1.patch \
+ file://CVE-2023-28486_CVE-2023-28487-2.patch \
"
PAM_SRC_URI = "file://sudo.pam"
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
new file mode 100644
index 0000000000..972cc8938b
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2022-39377.patch
@@ -0,0 +1,92 @@
+From 9c4eaf150662ad40607923389d4519bc83b93540 Mon Sep 17 00:00:00 2001
+From: Sebastien <seb@fedora-2.home>
+Date: Sat, 15 Oct 2022 14:24:22 +0200
+Subject: [PATCH] Fix size_t overflow in sa_common.c (GHSL-2022-074)
+
+allocate_structures function located in sa_common.c insufficiently
+checks bounds before arithmetic multiplication allowing for an
+overflow in the size allocated for the buffer representing system
+activities.
+
+This patch checks that the post-multiplied value is not greater than
+UINT_MAX.
+
+Signed-off-by: Sebastien <seb@fedora-2.home>
+
+Upstream-Status: Backport [https://github.com/sysstat/sysstat/commit/9c4eaf150662ad40607923389d4519bc83b93540]
+CVE : CVE-2022-39377
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ common.c | 25 +++++++++++++++++++++++++
+ common.h | 2 ++
+ sa_common.c | 6 ++++++
+ 3 files changed, 33 insertions(+)
+
+diff --git a/common.c b/common.c
+index ddfe75d..28d475e 100644
+--- a/common.c
++++ b/common.c
+@@ -1528,4 +1528,29 @@ int parse_values(char *strargv, unsigned char bitmap[], int max_val, const char
+
+ return 0;
+ }
++
++/*
++ ***************************************************************************
++ * Check if the multiplication of the 3 values may be greater than UINT_MAX.
++ *
++ * IN:
++ * @val1 First value.
++ * @val2 Second value.
++ * @val3 Third value.
++ ***************************************************************************
++ */
++void check_overflow(size_t val1, size_t val2, size_t val3)
++{
++ if ((unsigned long long) val1 *
++ (unsigned long long) val2 *
++ (unsigned long long) val3 > UINT_MAX) {
++#ifdef DEBUG
++ fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
++ __FUNCTION__,
++ (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++#endif
++ exit(4);
++ }
++}
++
+ #endif /* SOURCE_SADC undefined */
+diff --git a/common.h b/common.h
+index 86905ba..75f837a 100644
+--- a/common.h
++++ b/common.h
+@@ -249,6 +249,8 @@ int get_wwnid_from_pretty
+ (char *, unsigned long long *, unsigned int *);
+
+ #ifndef SOURCE_SADC
++void check_overflow
++ (size_t, size_t, size_t);
+ int count_bits
+ (void *, int);
+ int count_csvalues
+diff --git a/sa_common.c b/sa_common.c
+index 8a03099..ff90c1f 100644
+--- a/sa_common.c
++++ b/sa_common.c
+@@ -452,7 +452,13 @@ void allocate_structures(struct activity *act[])
+ int i, j;
+
+ for (i = 0; i < NR_ACT; i++) {
++
+ if (act[i]->nr_ini > 0) {
++
++ /* Look for a possible overflow */
++ check_overflow((size_t) act[i]->msize, (size_t) act[i]->nr_ini,
++ (size_t) act[i]->nr2);
++
+ for (j = 0; j < 3; j++) {
+ SREALLOC(act[i]->buf[j], void,
+ (size_t) act[i]->msize * (size_t) act[i]->nr_ini * (size_t) act[i]->nr2);
+--
+2.25.1
+
diff --git a/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
new file mode 100644
index 0000000000..9a27945a8b
--- /dev/null
+++ b/meta/recipes-extended/sysstat/sysstat/CVE-2023-33204.patch
@@ -0,0 +1,46 @@
+Origin: https://github.com/opencontainers/runc/commit/6f8dc568e6ab072bb8205b732f04e685bf9237c0
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-02-18
+
+From 954ff2e2673cef48f0ed44668c466eab041db387 Mon Sep 17 00:00:00 2001
+From: Pavel Kopylov <pkopylov@cloudlinux.com>
+Date: Wed, 17 May 2023 11:33:45 +0200
+Subject: [PATCH] Fix an overflow which is still possible for some values.
+
+CVE: CVE-2023-33204
+Upstream-Status: Backport [ upstream: https://github.com/sysstat/sysstat/commit/6f8dc568e6ab072bb8205b732f04e685bf9237c0
+debian: http://security.debian.org/debian-security/pool/updates/main/s/sysstat/sysstat_12.0.3-2+deb10u2.debian.tar.xz ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+---
+ common.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+Index: sysstat-12.0.3/common.c
+===================================================================
+--- sysstat-12.0.3.orig/common.c
++++ sysstat-12.0.3/common.c
+@@ -1449,15 +1449,16 @@ int parse_values(char *strargv, unsigned
+ */
+ void check_overflow(size_t val1, size_t val2, size_t val3)
+ {
+- if ((unsigned long long) val1 *
+- (unsigned long long) val2 *
+- (unsigned long long) val3 > UINT_MAX) {
++ if ((val1 != 0) && (val2 != 0) && (val3 != 0) &&
++ (((unsigned long long) UINT_MAX / (unsigned long long) val1 <
++ (unsigned long long) val2) ||
++ ((unsigned long long) UINT_MAX / ((unsigned long long) val1 * (unsigned long long) val2) <
++ (unsigned long long) val3))) {
+ #ifdef DEBUG
+- fprintf(stderr, "%s: Overflow detected (%llu). Aborting...\n",
+- __FUNCTION__,
+- (unsigned long long) val1 * (unsigned long long) val2 * (unsigned long long) val3);
++ fprintf(stderr, "%s: Overflow detected (%u,%u,%u). Aborting...\n",
++ __FUNCTION__, val1, val2, val3);
+ #endif
+- exit(4);
++ exit(4);
+ }
+ }
+
diff --git a/meta/recipes-extended/sysstat/sysstat_12.2.1.bb b/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
index 2a90f89d25..ac7b898db9 100644
--- a/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
+++ b/meta/recipes-extended/sysstat/sysstat_12.2.1.bb
@@ -2,7 +2,10 @@ require sysstat.inc
LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb"
-SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch"
+SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch \
+ file://CVE-2022-39377.patch \
+ file://CVE-2023-33204.patch \
+ "
SRC_URI[md5sum] = "9dfff5fac24e35bd92fb7896debf2ffb"
SRC_URI[sha256sum] = "8edb0e19b514ac560a098a02933a4735b881296d61014db89bf80f05dd7a4732"
diff --git a/meta/recipes-extended/tar/tar/CVE-2022-48303.patch b/meta/recipes-extended/tar/tar/CVE-2022-48303.patch
new file mode 100644
index 0000000000..b2f40f3e64
--- /dev/null
+++ b/meta/recipes-extended/tar/tar/CVE-2022-48303.patch
@@ -0,0 +1,43 @@
+From 3da78400eafcccb97e2f2fd4b227ea40d794ede8 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sat, 11 Feb 2023 11:57:39 +0200
+Subject: Fix boundary checking in base-256 decoder
+
+* src/list.c (from_header): Base-256 encoding is at least 2 bytes
+long.
+
+Upstream-Status: Backport [see reference below]
+CVE: CVE-2022-48303
+
+Reference to upstream patch:
+https://savannah.gnu.org/bugs/?62387
+https://git.savannah.gnu.org/cgit/tar.git/patch/src/list.c?id=3da78400eafcccb97e2f2fd4b227ea40d794ede8
+
+Signed-off-by: Rodolfo Quesada Zumbado <rodolfo.zumbado@windriver.com>
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+---
+ src/list.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)Signed-off-by: Rodolfo Quesada Zumbado <rodolfo.zumbado@windriver.com>
+
+
+(limited to 'src/list.c')
+
+diff --git a/src/list.c b/src/list.c
+index 9fafc42..86bcfdd 100644
+--- a/src/list.c
++++ b/src/list.c
+@@ -881,8 +881,9 @@ from_header (char const *where0, size_t digs, char const *type,
+ where++;
+ }
+ }
+- else if (*where == '\200' /* positive base-256 */
+- || *where == '\377' /* negative base-256 */)
++ else if (where <= lim - 2
++ && (*where == '\200' /* positive base-256 */
++ || *where == '\377' /* negative base-256 */))
+ {
+ /* Parse base-256 output. A nonnegative number N is
+ represented as (256**DIGS)/2 + N; a negative number -N is
+--
+cgit v1.1
+
diff --git a/meta/recipes-extended/tar/tar/CVE-2023-39804.patch b/meta/recipes-extended/tar/tar/CVE-2023-39804.patch
new file mode 100644
index 0000000000..f550928540
--- /dev/null
+++ b/meta/recipes-extended/tar/tar/CVE-2023-39804.patch
@@ -0,0 +1,64 @@
+From a339f05cd269013fa133d2f148d73f6f7d4247e4 Mon Sep 17 00:00:00 2001
+From: Sergey Poznyakoff <gray@gnu.org>
+Date: Sat, 28 Aug 2021 16:02:12 +0300
+Subject: Fix handling of extended header prefixes
+
+* src/xheader.c (locate_handler): Recognize prefix keywords only
+when followed by a dot.
+(xattr_decoder): Use xmalloc/xstrdup instead of alloc
+
+Upstream-Status: Backport [https://git.savannah.gnu.org/cgit/tar.git/commit/?id=a339f05cd269013fa133d2f148d73f6f7d4247e4]
+CVE: CVE-2023-39804
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/xheader.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/src/xheader.c b/src/xheader.c
+index 4f8b2b2..3cd694d 100644
+--- a/src/xheader.c
++++ b/src/xheader.c
+@@ -637,11 +637,11 @@ static struct xhdr_tab const *
+ locate_handler (char const *keyword)
+ {
+ struct xhdr_tab const *p;
+-
+ for (p = xhdr_tab; p->keyword; p++)
+ if (p->prefix)
+ {
+- if (strncmp (p->keyword, keyword, strlen(p->keyword)) == 0)
++ size_t kwlen = strlen (p->keyword);
++ if (keyword[kwlen] == '.' && strncmp (p->keyword, keyword, kwlen) == 0)
+ return p;
+ }
+ else
+@@ -1716,19 +1716,20 @@ xattr_decoder (struct tar_stat_info *st,
+ char const *keyword, char const *arg, size_t size)
+ {
+ char *xstr, *xkey;
+-
++
+ /* copy keyword */
+- size_t klen_raw = strlen (keyword);
+- xkey = alloca (klen_raw + 1);
+- memcpy (xkey, keyword, klen_raw + 1) /* including null-terminating */;
++ xkey = xstrdup (keyword);
+
+ /* copy value */
+- xstr = alloca (size + 1);
++ xstr = xmalloc (size + 1);
+ memcpy (xstr, arg, size + 1); /* separator included, for GNU tar '\n' */;
+
+ xattr_decode_keyword (xkey);
+
+- xheader_xattr_add (st, xkey + strlen("SCHILY.xattr."), xstr, size);
++ xheader_xattr_add (st, xkey + strlen ("SCHILY.xattr."), xstr, size);
++
++ free (xkey);
++ free (xstr);
+ }
+
+ static void
+--
+cgit v1.1
+
diff --git a/meta/recipes-extended/tar/tar_1.32.bb b/meta/recipes-extended/tar/tar_1.32.bb
index db1540dbd6..9297480e85 100644
--- a/meta/recipes-extended/tar/tar_1.32.bb
+++ b/meta/recipes-extended/tar/tar_1.32.bb
@@ -6,9 +6,13 @@ SECTION = "base"
LICENSE = "GPLv3"
LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504"
+PR = "r1"
+
SRC_URI = "${GNU_MIRROR}/tar/tar-${PV}.tar.bz2 \
file://musl_dirent.patch \
file://CVE-2021-20193.patch \
+ file://CVE-2022-48303.patch \
+ file://CVE-2023-39804.patch \
"
SRC_URI[md5sum] = "17917356fff5cb4bd3cd5a6c3e727b05"
diff --git a/meta/recipes-extended/timezone/timezone.inc b/meta/recipes-extended/timezone/timezone.inc
index cdd1a2ac3c..46bc1b794e 100644
--- a/meta/recipes-extended/timezone/timezone.inc
+++ b/meta/recipes-extended/timezone/timezone.inc
@@ -6,7 +6,7 @@ SECTION = "base"
LICENSE = "PD & BSD-3-Clause"
LIC_FILES_CHKSUM = "file://LICENSE;md5=c679c9d6b02bc2757b3eaf8f53c43fba"
-PV = "2022a"
+PV = "2024a"
SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz;name=tzcode \
http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata \
@@ -14,6 +14,5 @@ SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz
UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones"
-SRC_URI[tzcode.sha256sum] = "f8575e7e33be9ee265df2081092526b81c80abac3f4a04399ae9d4d91cdadac7"
-SRC_URI[tzdata.sha256sum] = "ef7fffd9f4f50f4f58328b35022a32a5a056b245c5cb3d6791dddb342f871664"
-
+SRC_URI[tzcode.sha256sum] = "80072894adff5a458f1d143e16e4ca1d8b2a122c9c5399da482cb68cba6a1ff8"
+SRC_URI[tzdata.sha256sum] = "0d0434459acbd2059a7a8da1f3304a84a86591f6ed69c6248fffa502b6edffe3"
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch b/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch
new file mode 100644
index 0000000000..6ba2b879a3
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2021-4217.patch
@@ -0,0 +1,67 @@
+From 731d698377dbd1f5b1b90efeb8094602ed59fc40 Mon Sep 17 00:00:00 2001
+From: Nils Bars <nils.bars@t-online.de>
+Date: Mon, 17 Jan 2022 16:53:16 +0000
+Subject: [PATCH] Fix null pointer dereference and use of uninitialized data
+
+This fixes a bug that causes use of uninitialized heap data if `readbuf` fails
+to read as many bytes as indicated by the extra field length attribute.
+Furthermore, this fixes a null pointer dereference if an archive contains an
+`EF_UNIPATH` extra field but does not have a filename set.
+---
+ fileio.c | 5 ++++-
+ process.c | 6 +++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+---
+
+Patch from:
+https://bugs.launchpad.net/ubuntu/+source/unzip/+bug/1957077
+https://launchpadlibrarian.net/580782282/0001-Fix-null-pointer-dereference-and-use-of-uninitialized-data.patch
+Regenerated to apply without offsets.
+
+CVE: CVE-2021-4217
+
+Upstream-Status: Pending [infozip upstream inactive]
+
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+
+diff --git a/fileio.c b/fileio.c
+index 14460f3..1dc319e 100644
+--- a/fileio.c
++++ b/fileio.c
+@@ -2301,8 +2301,11 @@ int do_string(__G__ length, option) /* return PK-type error code */
+ seek_zipf(__G__ G.cur_zipfile_bufstart - G.extra_bytes +
+ (G.inptr-G.inbuf) + length);
+ } else {
+- if (readbuf(__G__ (char *)G.extra_field, length) == 0)
++ unsigned bytes_read = readbuf(__G__ (char *)G.extra_field, length);
++ if (bytes_read == 0)
+ return PK_EOF;
++ if (bytes_read != length)
++ return PK_ERR;
+ /* Looks like here is where extra fields are read */
+ if (getZip64Data(__G__ G.extra_field, length) != PK_COOL)
+ {
+diff --git a/process.c b/process.c
+index 5f8f6c6..de843a5 100644
+--- a/process.c
++++ b/process.c
+@@ -2058,10 +2058,14 @@ int getUnicodeData(__G__ ef_buf, ef_len)
+ G.unipath_checksum = makelong(offset + ef_buf);
+ offset += 4;
+
++ if (!G.filename_full) {
++ /* Check if we have a unicode extra section but no filename set */
++ return PK_ERR;
++ }
++
+ /*
+ * Compute 32-bit crc
+ */
+-
+ chksum = crc32(chksum, (uch *)(G.filename_full),
+ strlen(G.filename_full));
+
+--
+2.32.0
+
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
new file mode 100644
index 0000000000..1c1e120deb
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0529.patch
@@ -0,0 +1,39 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0529
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/process.c b/process.c
+index d2a846e..99b9c7b 100644
+--- a/process.c
++++ b/process.c
+@@ -2507,13 +2507,15 @@ char *wide_to_local_string(wide_string, escape_all)
+ char buf[9];
+ char *buffer = NULL;
+ char *local_string = NULL;
++ size_t buffer_size;
+
+ for (wsize = 0; wide_string[wsize]; wsize++) ;
+
+ if (max_bytes < MAX_ESCAPE_BYTES)
+ max_bytes = MAX_ESCAPE_BYTES;
+
+- if ((buffer = (char *)malloc(wsize * max_bytes + 1)) == NULL) {
++ buffer_size = wsize * max_bytes + 1;
++ if ((buffer = (char *)malloc(buffer_size)) == NULL) {
+ return NULL;
+ }
+
+@@ -2552,7 +2554,11 @@ char *wide_to_local_string(wide_string, escape_all)
+ /* no MB for this wide */
+ /* use escape for wide character */
+ char *escape_string = wide_to_escape_string(wide_string[i]);
+- strcat(buffer, escape_string);
++ size_t buffer_len = strlen(buffer);
++ size_t escape_string_len = strlen(escape_string);
++ if (buffer_len + escape_string_len + 1 > buffer_size)
++ escape_string_len = buffer_size - buffer_len - 1;
++ strncat(buffer, escape_string, escape_string_len);
+ free(escape_string);
+ }
+ }
diff --git a/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
new file mode 100644
index 0000000000..363dafddc9
--- /dev/null
+++ b/meta/recipes-extended/unzip/unzip/CVE-2022-0530.patch
@@ -0,0 +1,33 @@
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1010355
+
+CVE: CVE-2022-0530
+Upstream-Status: Inactive-Upstream [need a new release]
+
+diff --git a/fileio.c b/fileio.c
+index 6290824..77e4b5f 100644
+--- a/fileio.c
++++ b/fileio.c
+@@ -2361,6 +2361,9 @@ int do_string(__G__ length, option) /* return PK-type error code */
+ /* convert UTF-8 to local character set */
+ fn = utf8_to_local_string(G.unipath_filename,
+ G.unicode_escape_all);
++ if (fn == NULL)
++ return PK_ERR;
++
+ /* make sure filename is short enough */
+ if (strlen(fn) >= FILNAMSIZ) {
+ fn[FILNAMSIZ - 1] = '\0';
+diff --git a/process.c b/process.c
+index d2a846e..715bc0f 100644
+--- a/process.c
++++ b/process.c
+@@ -2605,6 +2605,8 @@ char *utf8_to_local_string(utf8_string, escape_all)
+ int escape_all;
+ {
+ zwchar *wide = utf8_to_wide_string(utf8_string);
++ if (wide == NULL)
++ return NULL;
+ char *loc = wide_to_local_string(wide, escape_all);
+ free(wide);
+ return loc;
+
diff --git a/meta/recipes-extended/unzip/unzip_6.0.bb b/meta/recipes-extended/unzip/unzip_6.0.bb
index af5530ab38..fa57c8f5bd 100644
--- a/meta/recipes-extended/unzip/unzip_6.0.bb
+++ b/meta/recipes-extended/unzip/unzip_6.0.bb
@@ -26,6 +26,9 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/infozip/UnZip%206.x%20%28latest%29/UnZip%206.0/
file://CVE-2019-13232_p1.patch \
file://CVE-2019-13232_p2.patch \
file://CVE-2019-13232_p3.patch \
+ file://CVE-2021-4217.patch \
+ file://CVE-2022-0529.patch \
+ file://CVE-2022-0530.patch \
"
UPSTREAM_VERSION_UNKNOWN = "1"
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
new file mode 100644
index 0000000000..383634ad53
--- /dev/null
+++ b/meta/recipes-extended/xdg-utils/xdg-utils/CVE-2022-4055.patch
@@ -0,0 +1,165 @@
+From f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780 Mon Sep 17 00:00:00 2001
+From: Gabriel Corona <gabriel.corona@enst-bretagne.fr>
+Date: Thu, 25 Aug 2022 23:51:45 +0200
+Subject: [PATCH] Disable special support for Thunderbird in xdg-email (fixes
+ CVE-2020-27748, CVE-2022-4055)
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xdg/xdg-utils/-/commit/f67c4d1f8bd2e3cbcb9eb49f5e897075e7426780]
+CVE: CVE-2022-4055
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ scripts/xdg-email.in | 108 -------------------------------------------
+ 1 file changed, 108 deletions(-)
+
+diff --git a/scripts/xdg-email.in b/scripts/xdg-email.in
+index 13ba2d5..b700679 100644
+--- a/scripts/xdg-email.in
++++ b/scripts/xdg-email.in
+@@ -30,76 +30,8 @@ _USAGE
+
+ #@xdg-utils-common@
+
+-run_thunderbird()
+-{
+- local THUNDERBIRD MAILTO NEWMAILTO TO CC BCC SUBJECT BODY
+- THUNDERBIRD="$1"
+- MAILTO=$(echo "$2" | sed 's/^mailto://')
+- echo "$MAILTO" | grep -qs "^?"
+- if [ "$?" = "0" ] ; then
+- MAILTO=$(echo "$MAILTO" | sed 's/^?//')
+- else
+- MAILTO=$(echo "$MAILTO" | sed 's/^/to=/' | sed 's/?/\&/')
+- fi
+-
+- MAILTO=$(echo "$MAILTO" | sed 's/&/\n/g')
+- TO=$(/bin/echo -e $(echo "$MAILTO" | grep '^to=' | sed 's/^to=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- CC=$(/bin/echo -e $(echo "$MAILTO" | grep '^cc=' | sed 's/^cc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- BCC=$(/bin/echo -e $(echo "$MAILTO" | grep '^bcc=' | sed 's/^bcc=//;s/%\(..\)/\\x\1/g' | awk '{ printf "%s,",$0 }'))
+- SUBJECT=$(echo "$MAILTO" | grep '^subject=' | tail -n 1)
+- BODY=$(echo "$MAILTO" | grep '^body=' | tail -n 1)
+-
+- if [ -z "$TO" ] ; then
+- NEWMAILTO=
+- else
+- NEWMAILTO="to='$TO'"
+- fi
+- if [ -n "$CC" ] ; then
+- NEWMAILTO="${NEWMAILTO},cc='$CC'"
+- fi
+- if [ -n "$BCC" ] ; then
+- NEWMAILTO="${NEWMAILTO},bcc='$BCC'"
+- fi
+- if [ -n "$SUBJECT" ] ; then
+- NEWMAILTO="${NEWMAILTO},$SUBJECT"
+- fi
+- if [ -n "$BODY" ] ; then
+- NEWMAILTO="${NEWMAILTO},$BODY"
+- fi
+-
+- NEWMAILTO=$(echo "$NEWMAILTO" | sed 's/^,//')
+- DEBUG 1 "Running $THUNDERBIRD -compose \"$NEWMAILTO\""
+- "$THUNDERBIRD" -compose "$NEWMAILTO"
+- if [ $? -eq 0 ]; then
+- exit_success
+- else
+- exit_failure_operation_failed
+- fi
+-}
+-
+ open_kde()
+ {
+- if [ -n "$KDE_SESSION_VERSION" ] && [ "$KDE_SESSION_VERSION" -ge 5 ]; then
+- local kreadconfig=kreadconfig$KDE_SESSION_VERSION
+- else
+- local kreadconfig=kreadconfig
+- fi
+-
+- if which $kreadconfig >/dev/null 2>&1; then
+- local profile=$($kreadconfig --file emaildefaults \
+- --group Defaults --key Profile)
+- if [ -n "$profile" ]; then
+- local client=$($kreadconfig --file emaildefaults \
+- --group "PROFILE_$profile" \
+- --key EmailClient \
+- | cut -d ' ' -f 1)
+-
+- if echo "$client" | grep -Eq 'thunderbird|icedove'; then
+- run_thunderbird "$client" "$1"
+- fi
+- fi
+- fi
+-
+ local command
+ case "$KDE_SESSION_VERSION" in
+ '') command=kmailservice ;;
+@@ -130,15 +62,6 @@ open_kde()
+
+ open_gnome3()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -159,13 +82,6 @@ open_gnome3()
+
+ open_gnome()
+ {
+- local client
+- client=`gconftool-2 --get /desktop/gnome/url-handlers/mailto/command | cut -d ' ' -f 1` || ""
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ if gio help open 2>/dev/null 1>&2; then
+ DEBUG 1 "Running gio open \"$1\""
+ gio open "$1"
+@@ -231,15 +147,6 @@ open_flatpak()
+
+ open_generic()
+ {
+- local client
+- local desktop
+- desktop=`xdg-mime query default "x-scheme-handler/mailto"`
+- client=`desktop_file_to_binary "$desktop"`
+- echo $client | grep -E 'thunderbird|icedove' > /dev/null 2>&1
+- if [ $? -eq 0 ] ; then
+- run_thunderbird "$client" "$1"
+- fi
+-
+ xdg-open "$1"
+ local ret=$?
+
+@@ -364,21 +271,6 @@ while [ $# -gt 0 ] ; do
+ shift
+ ;;
+
+- --attach)
+- if [ -z "$1" ] ; then
+- exit_failure_syntax "file argument missing for --attach option"
+- fi
+- check_input_file "$1"
+- file=`readlink -f "$1"` # Normalize path
+- if [ -z "$file" ] || [ ! -f "$file" ] ; then
+- exit_failure_file_missing "file '$1' does not exist"
+- fi
+-
+- url_encode "$file"
+- options="${options}attach=${result}&"
+- shift
+- ;;
+-
+ -*)
+ exit_failure_syntax "unexpected option '$parm'"
+ ;;
+--
+2.25.1
+
diff --git a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
index 41b74b8598..f6989430f5 100644
--- a/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
+++ b/meta/recipes-extended/xdg-utils/xdg-utils_1.1.3.bb
@@ -21,6 +21,7 @@ SRC_URI = "https://portland.freedesktop.org/download/${BPN}-${PV}.tar.gz \
file://0001-Reinstate-xdg-terminal.patch \
file://0001-Don-t-build-the-in-script-manual.patch \
file://1f199813e0eb0246f63b54e9e154970e609575af.patch \
+ file://CVE-2022-4055.patch \
"
SRC_URI[md5sum] = "902042508b626027a3709d105f0b63ff"
diff --git a/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb b/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
index e2afb29c12..f43bfd6a67 100644
--- a/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
+++ b/meta/recipes-gnome/epiphany/epiphany_3.34.4.bb
@@ -16,6 +16,7 @@ REQUIRED_DISTRO_FEATURES = "x11 opengl"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive \
file://0002-help-meson.build-disable-the-use-of-yelp.patch \
+ file://CVE-2022-29536.patch \
"
SRC_URI[archive.md5sum] = "a559f164bb7d6cbeceb348648076830b"
SRC_URI[archive.sha256sum] = "60e190fc07ec7e33472e60c7e633e04004f7e277a0ffc5e9cd413706881e598d"
diff --git a/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch b/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch
new file mode 100644
index 0000000000..71cfc1238a
--- /dev/null
+++ b/meta/recipes-gnome/epiphany/files/CVE-2022-29536.patch
@@ -0,0 +1,46 @@
+CVE: CVE-2022-29536
+Upstream-Status: Backport [ https://gitlab.gnome.org/GNOME/epiphany/-/commit/486da133569ebfc436c959a7419565ab102e8525 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+From 486da133569ebfc436c959a7419565ab102e8525 Mon Sep 17 00:00:00 2001
+From: Michael Catanzaro <mcatanzaro@redhat.com>
+Date: Fri, 15 Apr 2022 18:09:46 -0500
+Subject: [PATCH] Fix memory corruption in ephy_string_shorten()
+
+This fixes a regression that I introduced in 232c613472b38ff0d0d97338f366024ddb9cd228.
+
+I got my browser stuck in a crash loop today while visiting a website
+with a page title greater than ephy-embed.c's MAX_TITLE_LENGTH, the only
+condition in which ephy_string_shorten() is ever used. Turns out this
+commit is wrong: an ellipses is a multibyte character (three bytes in
+UTF-8) and so we're writing past the end of the buffer when calling
+strcat() here. Ooops.
+
+Shame it took nearly four years to notice and correct this.
+
+Part-of: <https://gitlab.gnome.org/GNOME/epiphany/-/merge_requests/1106>
+---
+ lib/ephy-string.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/lib/ephy-string.c b/lib/ephy-string.c
+index 35a148ab32..8e524d52ca 100644
+--- a/lib/ephy-string.c
++++ b/lib/ephy-string.c
+@@ -114,11 +114,10 @@ ephy_string_shorten (char *str,
+ /* create string */
+ bytes = GPOINTER_TO_UINT (g_utf8_offset_to_pointer (str, target_length - 1) - str);
+
+- /* +1 for ellipsis, +1 for trailing NUL */
+- new_str = g_new (gchar, bytes + 1 + 1);
++ new_str = g_new (gchar, bytes + strlen ("…") + 1);
+
+ strncpy (new_str, str, bytes);
+- strcat (new_str, "…");
++ strncpy (new_str + bytes, "…", strlen ("…") + 1);
+
+ g_free (str);
+
+--
+GitLab
+
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch
new file mode 100644
index 0000000000..b29ab209ce
--- /dev/null
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf/CVE-2021-46829.patch
@@ -0,0 +1,61 @@
+From bdf3a2630c02a63803309cf0ad4b274234c814ce Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 9 Aug 2022 09:45:42 +0530
+Subject: [PATCH] CVE-2021-46829
+
+Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/gdk-pixbuf/-/commit/5398f04d772f7f8baf5265715696ed88db0f0512]
+CVE: CVE-2021-46829
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ gdk-pixbuf/io-gif-animation.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/gdk-pixbuf/io-gif-animation.c b/gdk-pixbuf/io-gif-animation.c
+index d742963..9544391 100644
+--- a/gdk-pixbuf/io-gif-animation.c
++++ b/gdk-pixbuf/io-gif-animation.c
+@@ -364,7 +364,7 @@ composite_frame (GdkPixbufGifAnim *anim, GdkPixbufFrame *frame)
+ for (i = 0; i < n_indexes; i++) {
+ guint8 index = index_buffer[i];
+ guint x, y;
+- int offset;
++ gsize offset;
+
+ if (index == frame->transparent_index)
+ continue;
+@@ -374,11 +374,13 @@ composite_frame (GdkPixbufGifAnim *anim, GdkPixbufFrame *frame)
+ if (x >= anim->width || y >= anim->height)
+ continue;
+
+- offset = y * gdk_pixbuf_get_rowstride (anim->last_frame_data) + x * 4;
+- pixels[offset + 0] = frame->color_map[index * 3 + 0];
+- pixels[offset + 1] = frame->color_map[index * 3 + 1];
+- pixels[offset + 2] = frame->color_map[index * 3 + 2];
+- pixels[offset + 3] = 255;
++ if (g_size_checked_mul (&offset, gdk_pixbuf_get_rowstride (anim->last_frame_data), y) &&
++ g_size_checked_add (&offset, offset, x * 4)) {
++ pixels[offset + 0] = frame->color_map[index * 3 + 0];
++ pixels[offset + 1] = frame->color_map[index * 3 + 1];
++ pixels[offset + 2] = frame->color_map[index * 3 + 2];
++ pixels[offset + 3] = 255;
++ }
+ }
+
+ out:
+@@ -443,8 +445,11 @@ gdk_pixbuf_gif_anim_iter_get_pixbuf (GdkPixbufAnimationIter *anim_iter)
+ x_end = MIN (anim->last_frame->x_offset + anim->last_frame->width, anim->width);
+ y_end = MIN (anim->last_frame->y_offset + anim->last_frame->height, anim->height);
+ for (y = anim->last_frame->y_offset; y < y_end; y++) {
+- guchar *line = pixels + y * gdk_pixbuf_get_rowstride (anim->last_frame_data) + anim->last_frame->x_offset * 4;
+- memset (line, 0, (x_end - anim->last_frame->x_offset) * 4);
++ gsize offset;
++ if (g_size_checked_mul (&offset, gdk_pixbuf_get_rowstride (anim->last_frame_data), y) &&
++ g_size_checked_add (&offset, offset, anim->last_frame->x_offset * 4)) {
++ memset (pixels + offset, 0, (x_end - anim->last_frame->x_offset) * 4);
++ }
+ }
+ break;
+ case GDK_PIXBUF_FRAME_REVERT:
+--
+2.25.1
+
diff --git a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
index 60a04c3581..1171e6cc11 100644
--- a/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
+++ b/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb
@@ -26,6 +26,7 @@ SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz \
file://missing-test-data.patch \
file://CVE-2020-29385.patch \
file://CVE-2021-20240.patch \
+ file://CVE-2021-46829.patch \
"
SRC_URI_append_class-target = " \
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
index 5232cf70c6..a2dba6cb20 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6461.patch
@@ -1,19 +1,20 @@
-There is a potential infinite-loop in function _arc_error_normalized().
+There is an assertion in function _cairo_arc_in_direction().
CVE: CVE-2019-6461
Upstream-Status: Pending
Signed-off-by: Ross Burton <ross.burton@intel.com>
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..f9249dbeb 100644
+index 390397bae..1bde774a4 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -99,7 +99,7 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
- do {
- angle = M_PI / i++;
- error = _arc_error_normalized (angle);
-- } while (error > tolerance);
-+ } while (error > tolerance && error > __DBL_EPSILON__);
+@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
+ if (cairo_status (cr))
+ return;
- return angle;
- }
+- assert (angle_max >= angle_min);
++ if (angle_max < angle_min)
++ return;
+
+ if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
+ angle_max = fmod (angle_max - angle_min, 2 * M_PI);
diff --git a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
index 4e4598c5b5..7c3209291b 100644
--- a/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
+++ b/meta/recipes-graphics/cairo/cairo/CVE-2019-6462.patch
@@ -1,20 +1,40 @@
-There is an assertion in function _cairo_arc_in_direction().
-
CVE: CVE-2019-6462
-Upstream-Status: Pending
-Signed-off-by: Ross Burton <ross.burton@intel.com>
+Upstream-Status: Backport
+Signed-off-by: Quentin Schulz <quentin.schulz@theobroma-systems.com>
+
+From ab2c5ee21e5f3d3ee4b3f67cfcd5811a4f99c3a0 Mon Sep 17 00:00:00 2001
+From: Heiko Lewin <hlewin@gmx.de>
+Date: Sun, 1 Aug 2021 11:16:03 +0000
+Subject: [PATCH] _arc_max_angle_for_tolerance_normalized: fix infinite loop
+
+---
+ src/cairo-arc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/cairo-arc.c b/src/cairo-arc.c
-index 390397bae..1bde774a4 100644
+index 390397bae..1c891d1a0 100644
--- a/src/cairo-arc.c
+++ b/src/cairo-arc.c
-@@ -186,7 +186,8 @@ _cairo_arc_in_direction (cairo_t *cr,
- if (cairo_status (cr))
- return;
+@@ -90,16 +90,18 @@ _arc_max_angle_for_tolerance_normalized (double tolerance)
+ { M_PI / 11.0, 9.81410988043554039085e-09 },
+ };
+ int table_size = ARRAY_LENGTH (table);
++ const int max_segments = 1000; /* this value is chosen arbitrarily. this gives an error of about 1.74909e-20 */
-- assert (angle_max >= angle_min);
-+ if (angle_max < angle_min)
-+ return;
+ for (i = 0; i < table_size; i++)
+ if (table[i].error < tolerance)
+ return table[i].angle;
- if (angle_max - angle_min > 2 * M_PI * MAX_FULL_CIRCLES) {
- angle_max = fmod (angle_max - angle_min, 2 * M_PI);
+ ++i;
++
+ do {
+ angle = M_PI / i++;
+ error = _arc_error_normalized (angle);
+- } while (error > tolerance);
++ } while (error > tolerance && i < max_segments);
+
+ return angle;
+ }
+--
+2.38.1
+
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch
new file mode 100644
index 0000000000..e66400ddb1
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27404.patch
@@ -0,0 +1,33 @@
+From 53dfdcd8198d2b3201a23c4bad9190519ba918db Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Thu, 17 Mar 2022 19:24:16 +0100
+Subject: [PATCH] [sfnt] Avoid invalid face index.
+
+Fixes #1138.
+
+* src/sfnt/sfobjs.c (sfnt_init_face), src/sfnt/sfwoff2.c (woff2_open_font):
+Check `face_index` before decrementing.
+
+CVE: CVE-2022-27404
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/53dfdcd8198d2b3201a23c4bad9190519ba918db.patch]
+Comment: Removed second hunk as sfwoff2.c file is not part of current v2.10.1 code
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/sfnt/sfobjs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/sfnt/sfobjs.c b/src/sfnt/sfobjs.c
+index f9d4d3858..9771c35df 100644
+--- a/src/sfnt/sfobjs.c
++++ b/src/sfnt/sfobjs.c
+@@ -566,7 +566,7 @@
+ face_index = FT_ABS( face_instance_index ) & 0xFFFF;
+
+ /* value -(N+1) requests information on index N */
+- if ( face_instance_index < 0 )
++ if ( face_instance_index < 0 && face_index > 0 )
+ face_index--;
+
+ if ( face_index >= face->ttc_header.count )
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch
new file mode 100644
index 0000000000..08fccd5a3b
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27405.patch
@@ -0,0 +1,38 @@
+From 22a0cccb4d9d002f33c1ba7a4b36812c7d4f46b5 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Sat, 19 Mar 2022 06:40:17 +0100
+Subject: [PATCH] * src/base/ftobjs.c (ft_open_face_internal): Properly guard
+ `face_index`.
+We must ensure that the cast to `FT_Int` doesn't change the sign.
+Fixes #1139.
+
+CVE: CVE-2022-27405
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/22a0cccb4d9d002f33c1ba7a4b36812c7d4f46b5]
+Comment: No Change in any hunk
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/base/ftobjs.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/src/base/ftobjs.c b/src/base/ftobjs.c
+index 2c0f0e6c9..10952a6c6 100644
+--- a/src/base/ftobjs.c
++++ b/src/base/ftobjs.c
+@@ -2527,6 +2527,15 @@
+ #endif
+
+
++ /* only use lower 31 bits together with sign bit */
++ if ( face_index > 0 )
++ face_index &= 0x7FFFFFFFL;
++ else
++ {
++ face_index &= 0x7FFFFFFFL;
++ face_index = -face_index;
++ }
++
+ #ifdef FT_DEBUG_LEVEL_TRACE
+ FT_TRACE3(( "FT_Open_Face: " ));
+ if ( face_index < 0 )
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch b/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch
new file mode 100644
index 0000000000..4b5e629f30
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2022-27406.patch
@@ -0,0 +1,31 @@
+From 0c2bdb01a2e1d24a3e592377a6d0822856e10df2 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Sat, 19 Mar 2022 09:37:28 +0100
+Subject: [PATCH] * src/base/ftobjs.c (FT_Request_Size): Guard `face->size`.
+
+Fixes #1140.
+
+CVE: CVE-2022-27406
+Upstream-Status: Backport [https://gitlab.freedesktop.org/freetype/freetype/-/commit/0c2bdb01a2e1d24a3e592377a6d0822856e10df2]
+Comment: No Change in any hunk
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+---
+ src/base/ftobjs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/base/ftobjs.c b/src/base/ftobjs.c
+index 6492a1517..282c9121a 100644
+--- a/src/base/ftobjs.c
++++ b/src/base/ftobjs.c
+@@ -3409,6 +3409,9 @@
+ if ( !face )
+ return FT_THROW( Invalid_Face_Handle );
+
++ if ( !face->size )
++ return FT_THROW( Invalid_Size_Handle );
++
+ if ( !req || req->width < 0 || req->height < 0 ||
+ req->type >= FT_SIZE_REQUEST_TYPE_MAX )
+ return FT_THROW( Invalid_Argument );
+--
+GitLab
diff --git a/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
new file mode 100644
index 0000000000..800d77579e
--- /dev/null
+++ b/meta/recipes-graphics/freetype/freetype/CVE-2023-2004.patch
@@ -0,0 +1,40 @@
+From e6fda039ad638866b7a6a5d046f03278ba1b7611 Mon Sep 17 00:00:00 2001
+From: Werner Lemberg <wl@gnu.org>
+Date: Mon, 14 Nov 2022 19:18:19 +0100
+Subject: [PATCH] * src/truetype/ttgxvar.c (tt_hvadvance_adjust): Integer
+ overflow.
+
+Reported as
+
+ https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50462
+
+Upstream-Status: Backport [https://github.com/freetype/freetype/commit/e6fda039ad638866b7a6a5d046f03278ba1b7611]
+CVE: CVE-2023-2004
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/truetype/ttgxvar.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/truetype/ttgxvar.c b/src/truetype/ttgxvar.c
+index 78d87dc..258d701 100644
+--- a/src/truetype/ttgxvar.c
++++ b/src/truetype/ttgxvar.c
+@@ -43,6 +43,7 @@
+ #include FT_INTERNAL_DEBUG_H
+ #include FT_CONFIG_CONFIG_H
+ #include FT_INTERNAL_STREAM_H
++#include <freetype/internal/ftcalc.h>
+ #include FT_INTERNAL_SFNT_H
+ #include FT_TRUETYPE_TAGS_H
+ #include FT_TRUETYPE_IDS_H
+@@ -1065,7 +1066,7 @@
+ delta == 1 ? "" : "s",
+ vertical ? "VVAR" : "HVAR" ));
+
+- *avalue += delta;
++ *avalue = ADD_INT( *avalue, delta );
+
+ Exit:
+ return error;
+--
+2.17.1
diff --git a/meta/recipes-graphics/freetype/freetype_2.10.1.bb b/meta/recipes-graphics/freetype/freetype_2.10.1.bb
index 2d444bbf19..6af744b981 100644
--- a/meta/recipes-graphics/freetype/freetype_2.10.1.bb
+++ b/meta/recipes-graphics/freetype/freetype_2.10.1.bb
@@ -15,6 +15,10 @@ LIC_FILES_CHKSUM = "file://docs/LICENSE.TXT;md5=4af6221506f202774ef74f64932878a1
SRC_URI = "${SAVANNAH_NONGNU_MIRROR}/${BPN}/${BP}.tar.xz \
file://use-right-libtool.patch \
file://0001-sfnt-Fix-heap-buffer-overflow-59308.patch \
+ file://CVE-2022-27404.patch \
+ file://CVE-2022-27405.patch \
+ file://CVE-2022-27406.patch \
+ file://CVE-2023-2004.patch \
"
SRC_URI[md5sum] = "bd42e75127f8431923679480efb5ba8f"
SRC_URI[sha256sum] = "16dbfa488a21fe827dc27eaf708f42f7aa3bb997d745d31a19781628c36ba26f"
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch
new file mode 100644
index 0000000000..90d4cfefb4
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre0.patch
@@ -0,0 +1,335 @@
+From 3122c2cdc45a964efedad8953a2df67205c3e3a8 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Sat, 4 Dec 2021 19:50:33 -0800
+Subject: [PATCH] [buffer] Add HB_GLYPH_FLAG_UNSAFE_TO_CONCAT
+
+Fixes https://github.com/harfbuzz/harfbuzz/issues/1463
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/3122c2cdc45a964efedad8953a2df67205c3e3a8]
+Comment1: To backport the fix for CVE-2023-25193, add defination for HB_GLYPH_FLAG_UNSAFE_TO_CONCAT. This patch is needed along with CVE-2023-25193-pre1.patch for sucessfull porting.
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/hb-buffer.cc | 10 ++---
+ src/hb-buffer.h | 76 ++++++++++++++++++++++++++++++------
+ src/hb-buffer.hh | 33 ++++++++++------
+ src/hb-ot-layout-gsubgpos.hh | 39 +++++++++++++++---
+ src/hb-ot-shape.cc | 8 +---
+ 5 files changed, 124 insertions(+), 42 deletions(-)
+
+diff --git a/src/hb-buffer.cc b/src/hb-buffer.cc
+index 6131c86..bba5eae 100644
+--- a/src/hb-buffer.cc
++++ b/src/hb-buffer.cc
+@@ -610,14 +610,14 @@ done:
+ }
+
+ void
+-hb_buffer_t::unsafe_to_break_impl (unsigned int start, unsigned int end)
++hb_buffer_t::unsafe_to_break_impl (unsigned int start, unsigned int end, hb_mask_t mask)
+ {
+ unsigned int cluster = (unsigned int) -1;
+ cluster = _unsafe_to_break_find_min_cluster (info, start, end, cluster);
+- _unsafe_to_break_set_mask (info, start, end, cluster);
++ _unsafe_to_break_set_mask (info, start, end, cluster, mask);
+ }
+ void
+-hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end)
++hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end, hb_mask_t mask)
+ {
+ if (!have_output)
+ {
+@@ -631,8 +631,8 @@ hb_buffer_t::unsafe_to_break_from_outbuffer (unsigned int start, unsigned int en
+ unsigned int cluster = (unsigned int) -1;
+ cluster = _unsafe_to_break_find_min_cluster (out_info, start, out_len, cluster);
+ cluster = _unsafe_to_break_find_min_cluster (info, idx, end, cluster);
+- _unsafe_to_break_set_mask (out_info, start, out_len, cluster);
+- _unsafe_to_break_set_mask (info, idx, end, cluster);
++ _unsafe_to_break_set_mask (out_info, start, out_len, cluster, mask);
++ _unsafe_to_break_set_mask (info, idx, end, cluster, mask);
+ }
+
+ void
+diff --git a/src/hb-buffer.h b/src/hb-buffer.h
+index d5cb746..42dc92a 100644
+--- a/src/hb-buffer.h
++++ b/src/hb-buffer.h
+@@ -77,26 +77,76 @@ typedef struct hb_glyph_info_t
+ * @HB_GLYPH_FLAG_UNSAFE_TO_BREAK: Indicates that if input text is broken at the
+ * beginning of the cluster this glyph is part of,
+ * then both sides need to be re-shaped, as the
+- * result might be different. On the flip side,
+- * it means that when this flag is not present,
+- * then it's safe to break the glyph-run at the
+- * beginning of this cluster, and the two sides
+- * represent the exact same result one would get
+- * if breaking input text at the beginning of
+- * this cluster and shaping the two sides
+- * separately. This can be used to optimize
+- * paragraph layout, by avoiding re-shaping
+- * of each line after line-breaking, or limiting
+- * the reshaping to a small piece around the
+- * breaking point only.
++ * result might be different.
++ *
++ * On the flip side, it means that when this
++ * flag is not present, then it is safe to break
++ * the glyph-run at the beginning of this
++ * cluster, and the two sides will represent the
++ * exact same result one would get if breaking
++ * input text at the beginning of this cluster
++ * and shaping the two sides separately.
++ *
++ * This can be used to optimize paragraph
++ * layout, by avoiding re-shaping of each line
++ * after line-breaking.
++ *
++ * @HB_GLYPH_FLAG_UNSAFE_TO_CONCAT: Indicates that if input text is changed on one
++ * side of the beginning of the cluster this glyph
++ * is part of, then the shaping results for the
++ * other side might change.
++ *
++ * Note that the absence of this flag will NOT by
++ * itself mean that it IS safe to concat text.
++ * Only two pieces of text both of which clear of
++ * this flag can be concatenated safely.
++ *
++ * This can be used to optimize paragraph
++ * layout, by avoiding re-shaping of each line
++ * after line-breaking, by limiting the
++ * reshaping to a small piece around the
++ * breaking positin only, even if the breaking
++ * position carries the
++ * #HB_GLYPH_FLAG_UNSAFE_TO_BREAK or when
++ * hyphenation or other text transformation
++ * happens at line-break position, in the following
++ * way:
++ *
++ * 1. Iterate back from the line-break position till
++ * the the first cluster start position that is
++ * NOT unsafe-to-concat, 2. shape the segment from
++ * there till the end of line, 3. check whether the
++ * resulting glyph-run also is clear of the
++ * unsafe-to-concat at its start-of-text position;
++ * if it is, just splice it into place and the line
++ * is shaped; If not, move on to a position further
++ * back that is clear of unsafe-to-concat and retry
++ * from there, and repeat.
++ *
++ * At the start of next line a similar algorithm can
++ * be implemented. A slight complication will arise,
++ * because while our buffer API has a way to
++ * return flags for position corresponding to
++ * start-of-text, there is currently no position
++ * corresponding to end-of-text. This limitation
++ * can be alleviated by shaping more text than needed
++ * and looking for unsafe-to-concat flag within text
++ * clusters.
++ *
++ * The #HB_GLYPH_FLAG_UNSAFE_TO_BREAK flag will
++ * always imply this flag.
++ *
++ * Since: REPLACEME
++ *
+ * @HB_GLYPH_FLAG_DEFINED: All the currently defined flags.
+ *
+ * Since: 1.5.0
+ */
+ typedef enum { /*< flags >*/
+ HB_GLYPH_FLAG_UNSAFE_TO_BREAK = 0x00000001,
++ HB_GLYPH_FLAG_UNSAFE_TO_CONCAT = 0x00000002,
+
+- HB_GLYPH_FLAG_DEFINED = 0x00000001 /* OR of all defined flags */
++ HB_GLYPH_FLAG_DEFINED = 0x00000003 /* OR of all defined flags */
+ } hb_glyph_flags_t;
+
+ HB_EXTERN hb_glyph_flags_t
+diff --git a/src/hb-buffer.hh b/src/hb-buffer.hh
+index b5596d9..beac7b6 100644
+--- a/src/hb-buffer.hh
++++ b/src/hb-buffer.hh
+@@ -67,8 +67,8 @@ enum hb_buffer_scratch_flags_t {
+ HB_BUFFER_SCRATCH_FLAG_HAS_DEFAULT_IGNORABLES = 0x00000002u,
+ HB_BUFFER_SCRATCH_FLAG_HAS_SPACE_FALLBACK = 0x00000004u,
+ HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT = 0x00000008u,
+- HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK = 0x00000010u,
+- HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000020u,
++ HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000010u,
++ HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS = 0x00000020u,
+
+ /* Reserved for complex shapers' internal use. */
+ HB_BUFFER_SCRATCH_FLAG_COMPLEX0 = 0x01000000u,
+@@ -324,8 +324,19 @@ struct hb_buffer_t
+ return;
+ unsafe_to_break_impl (start, end);
+ }
+- HB_INTERNAL void unsafe_to_break_impl (unsigned int start, unsigned int end);
+- HB_INTERNAL void unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end);
++ void unsafe_to_concat (unsigned int start,
++ unsigned int end)
++ {
++ if (end - start < 2)
++ return;
++ unsafe_to_break_impl (start, end, HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ }
++ HB_INTERNAL void unsafe_to_break_impl (unsigned int start, unsigned int end,
++ hb_mask_t mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK | HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ HB_INTERNAL void unsafe_to_break_from_outbuffer (unsigned int start, unsigned int end,
++ hb_mask_t mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK | HB_GLYPH_FLAG_UNSAFE_TO_CONCAT);
++ void unsafe_to_concat_from_outbuffer (unsigned int start, unsigned int end)
++ { unsafe_to_break_from_outbuffer (start, end, HB_GLYPH_FLAG_UNSAFE_TO_CONCAT); }
+
+
+ /* Internal methods */
+@@ -377,12 +388,7 @@ struct hb_buffer_t
+ set_cluster (hb_glyph_info_t &inf, unsigned int cluster, unsigned int mask = 0)
+ {
+ if (inf.cluster != cluster)
+- {
+- if (mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK)
+- inf.mask |= HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- else
+- inf.mask &= ~HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- }
++ inf.mask = (inf.mask & ~HB_GLYPH_FLAG_DEFINED) | (mask & HB_GLYPH_FLAG_DEFINED);
+ inf.cluster = cluster;
+ }
+
+@@ -398,13 +404,14 @@ struct hb_buffer_t
+ void
+ _unsafe_to_break_set_mask (hb_glyph_info_t *infos,
+ unsigned int start, unsigned int end,
+- unsigned int cluster)
++ unsigned int cluster,
++ hb_mask_t mask)
+ {
+ for (unsigned int i = start; i < end; i++)
+ if (cluster != infos[i].cluster)
+ {
+- scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK;
+- infos[i].mask |= HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
++ scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
++ infos[i].mask |= mask;
+ }
+ }
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index 579d178..a6ca456 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -369,7 +369,7 @@ struct hb_ot_apply_context_t :
+ may_skip (const hb_glyph_info_t &info) const
+ { return matcher.may_skip (c, info); }
+
+- bool next ()
++ bool next (unsigned *unsafe_to = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx + num_items < end)
+@@ -392,11 +392,17 @@ struct hb_ot_apply_context_t :
+ }
+
+ if (skip == matcher_t::SKIP_NO)
++ {
++ if (unsafe_to)
++ *unsafe_to = idx + 1;
+ return false;
++ }
+ }
++ if (unsafe_to)
++ *unsafe_to = end;
+ return false;
+ }
+- bool prev ()
++ bool prev (unsigned *unsafe_from = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx > num_items - 1)
+@@ -419,8 +425,14 @@ struct hb_ot_apply_context_t :
+ }
+
+ if (skip == matcher_t::SKIP_NO)
++ {
++ if (unsafe_from)
++ *unsafe_from = hb_max (1u, idx) - 1u;
+ return false;
++ }
+ }
++ if (unsafe_from)
++ *unsafe_from = 0;
+ return false;
+ }
+
+@@ -834,7 +846,12 @@ static inline bool match_input (hb_ot_apply_context_t *c,
+ match_positions[0] = buffer->idx;
+ for (unsigned int i = 1; i < count; i++)
+ {
+- if (!skippy_iter.next ()) return_trace (false);
++ unsigned unsafe_to;
++ if (!skippy_iter.next (&unsafe_to))
++ {
++ c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
++ return_trace (false);
++ }
+
+ match_positions[i] = skippy_iter.idx;
+
+@@ -1022,8 +1039,14 @@ static inline bool match_backtrack (hb_ot_apply_context_t *c,
+ skippy_iter.set_match_func (match_func, match_data, backtrack);
+
+ for (unsigned int i = 0; i < count; i++)
+- if (!skippy_iter.prev ())
++ {
++ unsigned unsafe_from;
++ if (!skippy_iter.prev (&unsafe_from))
++ {
++ c->buffer->unsafe_to_concat_from_outbuffer (unsafe_from, c->buffer->idx);
+ return_trace (false);
++ }
++ }
+
+ *match_start = skippy_iter.idx;
+
+@@ -1045,8 +1068,14 @@ static inline bool match_lookahead (hb_ot_apply_context_t *c,
+ skippy_iter.set_match_func (match_func, match_data, lookahead);
+
+ for (unsigned int i = 0; i < count; i++)
+- if (!skippy_iter.next ())
++ {
++ unsigned unsafe_to;
++ if (!skippy_iter.next (&unsafe_to))
++ {
++ c->buffer->unsafe_to_concat (c->buffer->idx + offset, unsafe_to);
+ return_trace (false);
++ }
++ }
+
+ *end_index = skippy_iter.idx + 1;
+
+diff --git a/src/hb-ot-shape.cc b/src/hb-ot-shape.cc
+index 5d9a70c..5d10b30 100644
+--- a/src/hb-ot-shape.cc
++++ b/src/hb-ot-shape.cc
+@@ -1008,7 +1008,7 @@ hb_propagate_flags (hb_buffer_t *buffer)
+ /* Propagate cluster-level glyph flags to be the same on all cluster glyphs.
+ * Simplifies using them. */
+
+- if (!(buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_UNSAFE_TO_BREAK))
++ if (!(buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS))
+ return;
+
+ hb_glyph_info_t *info = buffer->info;
+@@ -1017,11 +1017,7 @@ hb_propagate_flags (hb_buffer_t *buffer)
+ {
+ unsigned int mask = 0;
+ for (unsigned int i = start; i < end; i++)
+- if (info[i].mask & HB_GLYPH_FLAG_UNSAFE_TO_BREAK)
+- {
+- mask = HB_GLYPH_FLAG_UNSAFE_TO_BREAK;
+- break;
+- }
++ mask |= info[i].mask & HB_GLYPH_FLAG_DEFINED;
+ if (mask)
+ for (unsigned int i = start; i < end; i++)
+ info[i].mask |= mask;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
new file mode 100644
index 0000000000..4994e0ef68
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193-pre1.patch
@@ -0,0 +1,135 @@
+From b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 13:08:52 -0700
+Subject: [PATCH] [gsubgpos] Refactor skippy_iter.match()
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/b29fbd16fa82b82bdf0dcb2f13a63f7dc23cf324]
+Comment1: To backport the fix for CVE-2023-25193, add defination for MATCH, NOT_MATCH and SKIP.
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ src/hb-ot-layout-gsubgpos.hh | 94 +++++++++++++++++++++---------------
+ 1 file changed, 54 insertions(+), 40 deletions(-)
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index a6ca456..5a7e564 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -369,33 +369,52 @@ struct hb_ot_apply_context_t :
+ may_skip (const hb_glyph_info_t &info) const
+ { return matcher.may_skip (c, info); }
+
++ enum match_t {
++ MATCH,
++ NOT_MATCH,
++ SKIP
++ };
++
++ match_t match (hb_glyph_info_t &info)
++ {
++ matcher_t::may_skip_t skip = matcher.may_skip (c, info);
++ if (unlikely (skip == matcher_t::SKIP_YES))
++ return SKIP;
++
++ matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
++ if (match == matcher_t::MATCH_YES ||
++ (match == matcher_t::MATCH_MAYBE &&
++ skip == matcher_t::SKIP_NO))
++ return MATCH;
++
++ if (skip == matcher_t::SKIP_NO)
++ return NOT_MATCH;
++
++ return SKIP;
++ }
++
+ bool next (unsigned *unsafe_to = nullptr)
+ {
+ assert (num_items > 0);
+ while (idx + num_items < end)
+ {
+ idx++;
+- const hb_glyph_info_t &info = c->buffer->info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
+- {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
++ switch (match (c->buffer->info[idx]))
+ {
+- if (unsafe_to)
+- *unsafe_to = idx + 1;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_to)
++ *unsafe_to = idx + 1;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_to)
+@@ -408,27 +427,22 @@ struct hb_ot_apply_context_t :
+ while (idx > num_items - 1)
+ {
+ idx--;
+- const hb_glyph_info_t &info = c->buffer->out_info[idx];
+-
+- matcher_t::may_skip_t skip = matcher.may_skip (c, info);
+- if (unlikely (skip == matcher_t::SKIP_YES))
+- continue;
+-
+- matcher_t::may_match_t match = matcher.may_match (info, match_glyph_data);
+- if (match == matcher_t::MATCH_YES ||
+- (match == matcher_t::MATCH_MAYBE &&
+- skip == matcher_t::SKIP_NO))
++ switch (match (c->buffer->out_info[idx]))
+ {
+- num_items--;
+- if (match_glyph_data) match_glyph_data++;
+- return true;
+- }
+-
+- if (skip == matcher_t::SKIP_NO)
+- {
+- if (unsafe_from)
+- *unsafe_from = hb_max (1u, idx) - 1u;
+- return false;
++ case MATCH:
++ {
++ num_items--;
++ if (match_glyph_data) match_glyph_data++;
++ return true;
++ }
++ case NOT_MATCH:
++ {
++ if (unsafe_from)
++ *unsafe_from = hb_max (1u, idx) - 1u;
++ return false;
++ }
++ case SKIP:
++ continue;
+ }
+ }
+ if (unsafe_from)
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
new file mode 100644
index 0000000000..e4ac13dbad
--- /dev/null
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz/CVE-2023-25193.patch
@@ -0,0 +1,179 @@
+From 9c8e972dbecda93546038d24444d8216397d75a3 Mon Sep 17 00:00:00 2001
+From: Behdad Esfahbod <behdad@behdad.org>
+Date: Mon, 6 Feb 2023 14:51:25 -0700
+Subject: [PATCH] [GPOS] Avoid O(n^2) behavior in mark-attachment
+
+Upstream-Status: Backport from [https://github.com/harfbuzz/harfbuzz/commit/8708b9e081192786c027bb7f5f23d76dbe5c19e8]
+Comment1: The Original Patch [https://github.com/harfbuzz/harfbuzz/commit/85be877925ddbf34f74a1229f3ca1716bb6170dc] causes regression and was reverted. This Patch completes the fix.
+Comment2: The Patch contained files MarkBasePosFormat1.hh and MarkLigPosFormat1.hh which were moved from hb-ot-layout-gpos-table.hh as per https://github.com/harfbuzz/harfbuzz/commit/197d9a5c994eb41c8c89b7b958b26b1eacfeeb00
+CVE: CVE-2023-25193
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+Signed-off-by: Dhairya Nagodra <dnagodra@cisco.com>
+
+---
+ src/hb-ot-layout-gpos-table.hh | 103 +++++++++++++++++++++++----------
+ src/hb-ot-layout-gsubgpos.hh | 5 +-
+ 2 files changed, 78 insertions(+), 30 deletions(-)
+
+diff --git a/src/hb-ot-layout-gpos-table.hh b/src/hb-ot-layout-gpos-table.hh
+index 024312d..db5f9ae 100644
+--- a/src/hb-ot-layout-gpos-table.hh
++++ b/src/hb-ot-layout-gpos-table.hh
+@@ -1458,6 +1458,25 @@ struct MarkBasePosFormat1
+
+ const Coverage &get_coverage () const { return this+markCoverage; }
+
++ static inline bool accept (hb_buffer_t *buffer, unsigned idx)
++ {
++ /* We only want to attach to the first of a MultipleSubst sequence.
++ * https://github.com/harfbuzz/harfbuzz/issues/740
++ * Reject others...
++ * ...but stop if we find a mark in the MultipleSubst sequence:
++ * https://github.com/harfbuzz/harfbuzz/issues/1020 */
++ return !_hb_glyph_info_multiplied (&buffer->info[idx]) ||
++ 0 == _hb_glyph_info_get_lig_comp (&buffer->info[idx]) ||
++ (idx == 0 ||
++ _hb_glyph_info_is_mark (&buffer->info[idx - 1]) ||
++ !_hb_glyph_info_multiplied (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_id (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_id (&buffer->info[idx - 1]) ||
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx]) !=
++ _hb_glyph_info_get_lig_comp (&buffer->info[idx - 1]) + 1
++ );
++ }
++
+ bool apply (hb_ot_apply_context_t *c) const
+ {
+ TRACE_APPLY (this);
+@@ -1465,37 +1484,46 @@ struct MarkBasePosFormat1
+ unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+- /* Now we search backwards for a non-mark glyph */
++ /* Now we search backwards for a non-mark glyph.
++ * We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- do {
+- if (!skippy_iter.prev ()) return_trace (false);
+- /* We only want to attach to the first of a MultipleSubst sequence.
+- * https://github.com/harfbuzz/harfbuzz/issues/740
+- * Reject others...
+- * ...but stop if we find a mark in the MultipleSubst sequence:
+- * https://github.com/harfbuzz/harfbuzz/issues/1020 */
+- if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) ||
+- 0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) ||
+- (skippy_iter.idx == 0 ||
+- _hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) ||
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) !=
+- _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1
+- ))
+- break;
+- skippy_iter.reject ();
+- } while (true);
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
++ {
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
++ {
++ if (!accept (buffer, j - 1))
++ match = skippy_iter.SKIP;
++ }
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
++ return_trace (false);
++ }
++
++ unsigned idx = (unsigned) c->last_base;
+
+ /* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_base_glyph (&buffer->info[idx])) { return_trace (false); }
+
+- unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint);
+- if (base_index == NOT_COVERED) return_trace (false);
++ unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[idx].codepoint);
++ if (base_index == NOT_COVERED)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
++ return_trace (false);
++ }
+
+- return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx));
++ return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, idx));
+ }
+
+ bool subset (hb_subset_context_t *c) const
+@@ -1587,15 +1615,32 @@ struct MarkLigPosFormat1
+ if (likely (mark_index == NOT_COVERED)) return_trace (false);
+
+ /* Now we search backwards for a non-mark glyph */
++
+ hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
+- skippy_iter.reset (buffer->idx, 1);
+ skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
+- if (!skippy_iter.prev ()) return_trace (false);
++
++ unsigned j;
++ for (j = buffer->idx; j > c->last_base_until; j--)
++ {
++ auto match = skippy_iter.match (buffer->info[j - 1]);
++ if (match == skippy_iter.MATCH)
++ {
++ c->last_base = (signed) j - 1;
++ break;
++ }
++ }
++ c->last_base_until = buffer->idx;
++ if (c->last_base == -1)
++ {
++ buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
++ return_trace (false);
++ }
++
++ j = (unsigned) c->last_base;
+
+ /* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
+- //if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); }
++ //if (!_hb_glyph_info_is_ligature (&buffer->info[idx])) { return_trace (false); }
+
+- unsigned int j = skippy_iter.idx;
+ unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint);
+ if (lig_index == NOT_COVERED) return_trace (false);
+
+diff --git a/src/hb-ot-layout-gsubgpos.hh b/src/hb-ot-layout-gsubgpos.hh
+index 5a7e564..437123c 100644
+--- a/src/hb-ot-layout-gsubgpos.hh
++++ b/src/hb-ot-layout-gsubgpos.hh
+@@ -503,6 +503,9 @@ struct hb_ot_apply_context_t :
+ uint32_t random_state;
+
+
++ signed last_base = -1; // GPOS uses
++ unsigned last_base_until = 0; // GPOS uses
++
+ hb_ot_apply_context_t (unsigned int table_index_,
+ hb_font_t *font_,
+ hb_buffer_t *buffer_) :
+@@ -536,7 +539,7 @@ struct hb_ot_apply_context_t :
+ iter_context.init (this, true);
+ }
+
+- void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; init_iters (); }
++ void set_lookup_mask (hb_mask_t mask) { lookup_mask = mask; last_base = -1; last_base_until = 0; init_iters (); }
+ void set_auto_zwj (bool auto_zwj_) { auto_zwj = auto_zwj_; init_iters (); }
+ void set_auto_zwnj (bool auto_zwnj_) { auto_zwnj = auto_zwnj_; init_iters (); }
+ void set_random (bool random_) { random = random_; }
diff --git a/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb b/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
index ee08c12bee..0cfe01f1e5 100644
--- a/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
+++ b/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.4.bb
@@ -7,7 +7,10 @@ LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://COPYING;md5=e11f5c3149cdec4bb309babb020b32b9 \
file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc"
-SRC_URI = "http://www.freedesktop.org/software/harfbuzz/release/${BP}.tar.xz"
+SRC_URI = "http://www.freedesktop.org/software/harfbuzz/release/${BP}.tar.xz \
+ file://CVE-2023-25193-pre0.patch \
+ file://CVE-2023-25193-pre1.patch \
+ file://CVE-2023-25193.patch"
SRC_URI[md5sum] = "2b3a4dfdb3e5e50055f941978944da9f"
SRC_URI[sha256sum] = "9413b8d96132d699687ef914ebb8c50440efc87b3f775d25856d7ec347c03c12"
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch
new file mode 100644
index 0000000000..8a52ed01e9
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-1.patch
@@ -0,0 +1,457 @@
+From 9120a247436e84c0b4eea828cb11e8f665fcde30 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Thu, 23 Jul 2020 21:24:38 -0500
+Subject: [PATCH] Fix jpeg_skip_scanlines() segfault w/merged upsamp
+
+The additional segfault mentioned in #244 was due to the fact that
+the merged upsamplers use a different private structure than the
+non-merged upsamplers. jpeg_skip_scanlines() was assuming the latter, so
+when merged upsampling was enabled, jpeg_skip_scanlines() clobbered one
+of the IDCT method pointers in the merged upsampler's private structure.
+
+For reasons unknown, the test image in #441 did not encounter this
+segfault (too small?), but it encountered an issue similar to the one
+fixed in 5bc43c7821df982f65aa1c738f67fbf7cba8bd69, whereby it was
+necessary to set up a dummy postprocessing function in
+read_and_discard_scanlines() when merged upsampling was enabled.
+Failing to do so caused either a segfault in merged_2v_upsample() (due
+to a NULL pointer being passed to jcopy_sample_rows()) or an error
+("Corrupt JPEG data: premature end of data segment"), depending on the
+number of scanlines skipped and whether the first scanline skipped was
+an odd- or even-numbered row.
+
+Fixes #441
+Fixes #244 (for real this time)
+
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/9120a247436e84c0b4eea828cb11e8f665fcde30]
+CVE: CVE-2020-35538
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ ChangeLog.md | 7 +++++
+ jdapistd.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++------
+ jdmerge.c | 46 +++++++--------------------------
+ jdmerge.h | 47 ++++++++++++++++++++++++++++++++++
+ jdmrg565.c | 10 ++++----
+ jdmrgext.c | 6 ++---
+ 6 files changed, 135 insertions(+), 53 deletions(-)
+ create mode 100644 jdmerge.h
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 2ebfe71..19d18fa 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -54,6 +54,13 @@ a 16-bit binary PGM file into an RGB image buffer.
+ generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
+ file into an extended RGB image buffer.
+
++2. Fixed segfaults or "Corrupt JPEG data: premature end of data segment" errors
++in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or 4:2:0 JPEG
++images using the merged (non-fancy) upsampling algorithms (that is, when
++setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a similar fix,
++but it did not cover all cases.
++
++
+ 2.0.3
+ =====
+
+diff --git a/jdapistd.c b/jdapistd.c
+index 2c808fa..91da642 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2010, 2015-2018, D. R. Commander.
++ * Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -21,6 +21,8 @@
+ #include "jinclude.h"
+ #include "jdmainct.h"
+ #include "jdcoefct.h"
++#include "jdmaster.h"
++#include "jdmerge.h"
+ #include "jdsample.h"
+ #include "jmemsys.h"
+
+@@ -304,6 +306,16 @@ noop_quantize(j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ }
+
+
++/* Dummy postprocessing function used by jpeg_skip_scanlines() */
++LOCAL(void)
++noop_post_process (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION *in_row_group_ctr,
++ JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf,
++ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
++{
++}
++
++
+ /*
+ * In some cases, it is best to call jpeg_read_scanlines() and discard the
+ * output, rather than skipping the scanlines, because this allows us to
+@@ -316,11 +328,17 @@ LOCAL(void)
+ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ JDIMENSION n;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
+ void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows) = NULL;
+ void (*color_quantize) (j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPARRAY output_buf, int num_rows) = NULL;
++ void (*post_process_data) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION *in_row_group_ctr,
++ JDIMENSION in_row_groups_avail,
++ JSAMPARRAY output_buf, JDIMENSION *out_row_ctr,
++ JDIMENSION out_rows_avail) = NULL;
+
+ if (cinfo->cconvert && cinfo->cconvert->color_convert) {
+ color_convert = cinfo->cconvert->color_convert;
+@@ -332,6 +350,12 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->cquantize->color_quantize = noop_quantize;
+ }
+
++ if (master->using_merged_upsample && cinfo->post &&
++ cinfo->post->post_process_data) {
++ post_process_data = cinfo->post->post_process_data;
++ cinfo->post->post_process_data = noop_post_process;
++ }
++
+ for (n = 0; n < num_lines; n++)
+ jpeg_read_scanlines(cinfo, NULL, 1);
+
+@@ -340,6 +364,9 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+
+ if (color_quantize)
+ cinfo->cquantize->color_quantize = color_quantize;
++
++ if (post_process_data)
++ cinfo->post->post_process_data = post_process_data;
+ }
+
+
+@@ -382,7 +409,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
+ my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
+ JDIMENSION i, x;
+ int y;
+ JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
+@@ -445,8 +472,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+ main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
+- upsample->next_row_out = cinfo->max_v_samp_factor;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->spare_full = FALSE;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->next_row_out = cinfo->max_v_samp_factor;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ }
+
+ /* Skipping is much simpler when context rows are not required. */
+@@ -458,8 +493,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_scanline += lines_left_in_iMCU_row;
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+- upsample->next_row_out = cinfo->max_v_samp_factor;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->spare_full = FALSE;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->next_row_out = cinfo->max_v_samp_factor;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ }
+ }
+
+@@ -494,7 +537,14 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
+ increment_simple_rowgroup_ctr(cinfo, lines_to_read);
+ }
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample =
++ (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+ return num_lines;
+ }
+
+@@ -535,7 +585,13 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ * bit odd, since "rows_to_go" seems to be redundantly keeping track of
+ * output_scanline.
+ */
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ if (master->using_merged_upsample) {
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ } else {
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
++ }
+
+ /* Always skip the requested number of lines. */
+ return num_lines;
+diff --git a/jdmerge.c b/jdmerge.c
+index dff5a35..833ad67 100644
+--- a/jdmerge.c
++++ b/jdmerge.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+ * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
+- * Copyright (C) 2009, 2011, 2014-2015, D. R. Commander.
++ * Copyright (C) 2009, 2011, 2014-2015, 2020, D. R. Commander.
+ * Copyright (C) 2013, Linaro Limited.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -40,41 +40,13 @@
+ #define JPEG_INTERNALS
+ #include "jinclude.h"
+ #include "jpeglib.h"
++#include "jdmerge.h"
+ #include "jsimd.h"
+ #include "jconfigint.h"
+
+ #ifdef UPSAMPLE_MERGING_SUPPORTED
+
+
+-/* Private subobject */
+-
+-typedef struct {
+- struct jpeg_upsampler pub; /* public fields */
+-
+- /* Pointer to routine to do actual upsampling/conversion of one row group */
+- void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
+-
+- /* Private state for YCC->RGB conversion */
+- int *Cr_r_tab; /* => table for Cr to R conversion */
+- int *Cb_b_tab; /* => table for Cb to B conversion */
+- JLONG *Cr_g_tab; /* => table for Cr to G conversion */
+- JLONG *Cb_g_tab; /* => table for Cb to G conversion */
+-
+- /* For 2:1 vertical sampling, we produce two output rows at a time.
+- * We need a "spare" row buffer to hold the second output row if the
+- * application provides just a one-row buffer; we also use the spare
+- * to discard the dummy last row if the image height is odd.
+- */
+- JSAMPROW spare_row;
+- boolean spare_full; /* T if spare buffer is occupied */
+-
+- JDIMENSION out_row_width; /* samples per output row */
+- JDIMENSION rows_to_go; /* counts rows remaining in image */
+-} my_upsampler;
+-
+-typedef my_upsampler *my_upsample_ptr;
+-
+ #define SCALEBITS 16 /* speediest right-shift on some machines */
+ #define ONE_HALF ((JLONG)1 << (SCALEBITS - 1))
+ #define FIX(x) ((JLONG)((x) * (1L << SCALEBITS) + 0.5))
+@@ -189,7 +161,7 @@ typedef my_upsampler *my_upsample_ptr;
+ LOCAL(void)
+ build_ycc_rgb_table(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ int i;
+ JLONG x;
+ SHIFT_TEMPS
+@@ -232,7 +204,7 @@ build_ycc_rgb_table(j_decompress_ptr cinfo)
+ METHODDEF(void)
+ start_pass_merged_upsample(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+
+ /* Mark the spare buffer empty */
+ upsample->spare_full = FALSE;
+@@ -254,7 +226,7 @@ merged_2v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+ /* 2:1 vertical sampling case: may need a spare row. */
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ JSAMPROW work_ptrs[2];
+ JDIMENSION num_rows; /* number of rows returned to caller */
+
+@@ -305,7 +277,7 @@ merged_1v_upsample(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+ /* 1:1 vertical sampling case: much easier, never need a spare row. */
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+
+ /* Just do the upsampling. */
+ (*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
+@@ -566,11 +538,11 @@ h2v2_merged_upsample_565D(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ GLOBAL(void)
+ jinit_merged_upsampler(j_decompress_ptr cinfo)
+ {
+- my_upsample_ptr upsample;
++ my_merged_upsample_ptr upsample;
+
+- upsample = (my_upsample_ptr)
++ upsample = (my_merged_upsample_ptr)
+ (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
+- sizeof(my_upsampler));
++ sizeof(my_merged_upsampler));
+ cinfo->upsample = (struct jpeg_upsampler *)upsample;
+ upsample->pub.start_pass = start_pass_merged_upsample;
+ upsample->pub.need_context_rows = FALSE;
+diff --git a/jdmerge.h b/jdmerge.h
+new file mode 100644
+index 0000000..b583396
+--- /dev/null
++++ b/jdmerge.h
+@@ -0,0 +1,47 @@
++/*
++ * jdmerge.h
++ *
++ * This file was part of the Independent JPEG Group's software:
++ * Copyright (C) 1994-1996, Thomas G. Lane.
++ * libjpeg-turbo Modifications:
++ * Copyright (C) 2020, D. R. Commander.
++ * For conditions of distribution and use, see the accompanying README.ijg
++ * file.
++ */
++
++#define JPEG_INTERNALS
++#include "jpeglib.h"
++
++#ifdef UPSAMPLE_MERGING_SUPPORTED
++
++
++/* Private subobject */
++
++typedef struct {
++ struct jpeg_upsampler pub; /* public fields */
++
++ /* Pointer to routine to do actual upsampling/conversion of one row group */
++ void (*upmethod) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
++ JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf);
++
++ /* Private state for YCC->RGB conversion */
++ int *Cr_r_tab; /* => table for Cr to R conversion */
++ int *Cb_b_tab; /* => table for Cb to B conversion */
++ JLONG *Cr_g_tab; /* => table for Cr to G conversion */
++ JLONG *Cb_g_tab; /* => table for Cb to G conversion */
++
++ /* For 2:1 vertical sampling, we produce two output rows at a time.
++ * We need a "spare" row buffer to hold the second output row if the
++ * application provides just a one-row buffer; we also use the spare
++ * to discard the dummy last row if the image height is odd.
++ */
++ JSAMPROW spare_row;
++ boolean spare_full; /* T if spare buffer is occupied */
++
++ JDIMENSION out_row_width; /* samples per output row */
++ JDIMENSION rows_to_go; /* counts rows remaining in image */
++} my_merged_upsampler;
++
++typedef my_merged_upsampler *my_merged_upsample_ptr;
++
++#endif /* UPSAMPLE_MERGING_SUPPORTED */
+diff --git a/jdmrg565.c b/jdmrg565.c
+index 1b87e37..53f1e16 100644
+--- a/jdmrg565.c
++++ b/jdmrg565.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+ * Copyright (C) 2013, Linaro Limited.
+- * Copyright (C) 2014-2015, 2018, D. R. Commander.
++ * Copyright (C) 2014-2015, 2018, 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -19,7 +19,7 @@ h2v1_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -90,7 +90,7 @@ h2v1_merged_upsample_565D_internal(j_decompress_ptr cinfo,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -163,7 +163,7 @@ h2v2_merged_upsample_565_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+@@ -259,7 +259,7 @@ h2v2_merged_upsample_565D_internal(j_decompress_ptr cinfo,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+diff --git a/jdmrgext.c b/jdmrgext.c
+index b1c27df..c9a44d8 100644
+--- a/jdmrgext.c
++++ b/jdmrgext.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2011, 2015, D. R. Commander.
++ * Copyright (C) 2011, 2015, 2020, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -25,7 +25,7 @@ h2v1_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr;
+@@ -97,7 +97,7 @@ h2v2_merged_upsample_internal(j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION in_row_group_ctr,
+ JSAMPARRAY output_buf)
+ {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
+ register int y, cred, cgreen, cblue;
+ int cb, cr;
+ register JSAMPROW outptr0, outptr1;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch
new file mode 100644
index 0000000000..f86175dff0
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2020-35538-2.patch
@@ -0,0 +1,400 @@
+From a46c111d9f3642f0ef3819e7298846ccc61869e0 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Mon, 27 Jul 2020 14:21:23 -0500
+Subject: [PATCH] Further jpeg_skip_scanlines() fixes
+
+- Introduce a partial image decompression regression test script that
+ validates the correctness of jpeg_skip_scanlines() and
+ jpeg_crop_scanlines() for a variety of cropping regions and libjpeg
+ settings.
+
+ This regression test catches the following issues:
+ #182, fixed in 5bc43c7
+ #237, fixed in 6e95c08
+ #244, fixed in 398c1e9
+ #441, fully fixed in this commit
+
+ It does not catch the following issues:
+ #194, fixed in 773040f
+ #244 (additional segfault), fixed in
+ 9120a24
+
+- Modify the libjpeg-turbo regression test suite (make test) so that it
+ checks for the issue reported in #441 (segfault in
+ jpeg_skip_scanlines() when used with 4:2:0 merged upsampling/color
+ conversion.)
+
+- Fix issues in jpeg_skip_scanlines() that caused incorrect output with
+ h2v2 (4:2:0) merged upsampling/color conversion. The previous commit
+ fixed the segfault reported in #441, but that was a symptom of a
+ larger problem. Because merged 4:2:0 upsampling uses a "spare row"
+ buffer, it is necessary to allow the upsampler to run when skipping
+ rows (fancy 4:2:0 upsampling, which uses context rows, also requires
+ this.) Otherwise, if skipping starts at an odd-numbered row, the
+ output image will be incorrect.
+
+- Throw an error if jpeg_skip_scanlines() is called with two-pass color
+ quantization enabled. With two-pass color quantization, the first
+ pass occurs within jpeg_start_decompress(), so subsequent calls to
+ jpeg_skip_scanlines() interfere with the multipass state and prevent
+ the second pass from occurring during subsequent calls to
+ jpeg_read_scanlines().
+
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/a46c111d9f3642f0ef3819e7298846ccc61869e0]
+CVE: CVE-2020-35538
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ CMakeLists.txt | 9 +++--
+ ChangeLog.md | 15 +++++---
+ croptest.in | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ jdapistd.c | 70 +++++++++++--------------------------
+ libjpeg.txt | 6 ++--
+ 5 files changed, 136 insertions(+), 59 deletions(-)
+ create mode 100755 croptest.in
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index aee74c9..de451f4 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -753,7 +753,7 @@ else()
+ set(MD5_PPM_3x2_IFAST fd283664b3b49127984af0a7f118fccd)
+ set(MD5_JPEG_420_ISLOW_ARI e986fb0a637a8d833d96e8a6d6d84ea1)
+ set(MD5_JPEG_444_ISLOW_PROGARI 0a8f1c8f66e113c3cf635df0a475a617)
+- set(MD5_PPM_420M_IFAST_ARI 72b59a99bcf1de24c5b27d151bde2437)
++ set(MD5_PPM_420M_IFAST_ARI 57251da28a35b46eecb7177d82d10e0e)
+ set(MD5_JPEG_420_ISLOW 9a68f56bc76e466aa7e52f415d0f4a5f)
+ set(MD5_PPM_420M_ISLOW_2_1 9f9de8c0612f8d06869b960b05abf9c9)
+ set(MD5_PPM_420M_ISLOW_15_8 b6875bc070720b899566cc06459b63b7)
+@@ -1131,7 +1131,7 @@ foreach(libtype ${TEST_LIBTYPES})
+
+ if(WITH_ARITH_DEC)
+ # CC: RGB->YCC SAMP: h2v2 merged IDCT: ifast ENT: arith
+- add_bittest(djpeg 420m-ifast-ari "-fast;-ppm"
++ add_bittest(djpeg 420m-ifast-ari "-fast;-skip;1,20;-ppm"
+ testout_420m_ifast_ari.ppm ${TESTIMAGES}/testimgari.jpg
+ ${MD5_PPM_420M_IFAST_ARI})
+
+@@ -1266,6 +1266,11 @@ endforeach()
+ add_custom_target(testclean COMMAND ${CMAKE_COMMAND} -P
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmakescripts/testclean.cmake)
+
++configure_file(croptest.in croptest @ONLY)
++add_custom_target(croptest
++ COMMAND echo croptest
++ COMMAND ${BASH} ${CMAKE_CURRENT_BINARY_DIR}/croptest)
++
+ if(WITH_TURBOJPEG)
+ configure_file(tjbenchtest.in tjbenchtest @ONLY)
+ configure_file(tjexampletest.in tjexampletest @ONLY)
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 19d18fa..4562eff 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -54,11 +54,16 @@ a 16-bit binary PGM file into an RGB image buffer.
+ generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
+ file into an extended RGB image buffer.
+
+-2. Fixed segfaults or "Corrupt JPEG data: premature end of data segment" errors
+-in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or 4:2:0 JPEG
+-images using the merged (non-fancy) upsampling algorithms (that is, when
+-setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a similar fix,
+-but it did not cover all cases.
++2. Fixed or worked around multiple issues with `jpeg_skip_scanlines()`:
++
++ - Fixed segfaults or "Corrupt JPEG data: premature end of data segment"
++errors in `jpeg_skip_scanlines()` that occurred when decompressing 4:2:2 or
++4:2:0 JPEG images using merged (non-fancy) upsampling/color conversion (that
++is, when setting `cinfo.do_fancy_upsampling` to `FALSE`.) 2.0.0[6] was a
++similar fix, but it did not cover all cases.
++ - `jpeg_skip_scanlines()` now throws an error if two-pass color
++quantization is enabled. Two-pass color quantization never worked properly
++with `jpeg_skip_scanlines()`, and the issues could not readily be fixed.
+
+
+ 2.0.3
+diff --git a/croptest.in b/croptest.in
+new file mode 100755
+index 0000000..7e3c293
+--- /dev/null
++++ b/croptest.in
+@@ -0,0 +1,95 @@
++#!/bin/bash
++
++set -u
++set -e
++trap onexit INT
++trap onexit TERM
++trap onexit EXIT
++
++onexit()
++{
++ if [ -d $OUTDIR ]; then
++ rm -rf $OUTDIR
++ fi
++}
++
++runme()
++{
++ echo \*\*\* $*
++ $*
++}
++
++IMAGE=vgl_6548_0026a.bmp
++WIDTH=128
++HEIGHT=95
++IMGDIR=@CMAKE_CURRENT_SOURCE_DIR@/testimages
++OUTDIR=`mktemp -d /tmp/__croptest_output.XXXXXX`
++EXEDIR=@CMAKE_CURRENT_BINARY_DIR@
++
++if [ -d $OUTDIR ]; then
++ rm -rf $OUTDIR
++fi
++mkdir -p $OUTDIR
++
++exec >$EXEDIR/croptest.log
++
++echo "============================================================"
++echo "$IMAGE ($WIDTH x $HEIGHT)"
++echo "============================================================"
++echo
++
++for PROGARG in "" -progressive; do
++
++ cp $IMGDIR/$IMAGE $OUTDIR
++ basename=`basename $IMAGE .bmp`
++ echo "------------------------------------------------------------"
++ echo "Generating test images"
++ echo "------------------------------------------------------------"
++ echo
++ runme $EXEDIR/cjpeg $PROGARG -grayscale -outfile $OUTDIR/${basename}_GRAY.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 2x2 -outfile $OUTDIR/${basename}_420.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 2x1 -outfile $OUTDIR/${basename}_422.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 1x2 -outfile $OUTDIR/${basename}_440.jpg $IMGDIR/${basename}.bmp
++ runme $EXEDIR/cjpeg $PROGARG -sample 1x1 -outfile $OUTDIR/${basename}_444.jpg $IMGDIR/${basename}.bmp
++ echo
++
++ for NSARG in "" -nosmooth; do
++
++ for COLORSARG in "" "-colors 256 -dither none -onepass"; do
++
++ for Y in {0..16}; do
++
++ for H in {1..16}; do
++
++ X=$(( (Y*16)%128 ))
++ W=$(( WIDTH-X-7 ))
++ if [ $Y -le 15 ]; then
++ CROPSPEC="${W}x${H}+${X}+${Y}"
++ else
++ Y2=$(( HEIGHT-H ));
++ CROPSPEC="${W}x${H}+${X}+${Y2}"
++ fi
++
++ echo "------------------------------------------------------------"
++ echo $PROGARG $NSARG $COLORSARG -crop $CROPSPEC
++ echo "------------------------------------------------------------"
++ echo
++ for samp in GRAY 420 422 440 444; do
++ $EXEDIR/djpeg $NSARG $COLORSARG -rgb -outfile $OUTDIR/${basename}_${samp}_full.ppm $OUTDIR/${basename}_${samp}.jpg
++ convert -crop $CROPSPEC $OUTDIR/${basename}_${samp}_full.ppm $OUTDIR/${basename}_${samp}_ref.ppm
++ runme $EXEDIR/djpeg $NSARG $COLORSARG -crop $CROPSPEC -rgb -outfile $OUTDIR/${basename}_${samp}.ppm $OUTDIR/${basename}_${samp}.jpg
++ runme cmp $OUTDIR/${basename}_${samp}.ppm $OUTDIR/${basename}_${samp}_ref.ppm
++ done
++ echo
++
++ done
++
++ done
++
++ done
++
++ done
++
++done
++
++echo SUCCESS!
+diff --git a/jdapistd.c b/jdapistd.c
+index 91da642..c502909 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -306,16 +306,6 @@ noop_quantize(j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ }
+
+
+-/* Dummy postprocessing function used by jpeg_skip_scanlines() */
+-LOCAL(void)
+-noop_post_process (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION *in_row_group_ctr,
+- JDIMENSION in_row_groups_avail, JSAMPARRAY output_buf,
+- JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
+-{
+-}
+-
+-
+ /*
+ * In some cases, it is best to call jpeg_read_scanlines() and discard the
+ * output, rather than skipping the scanlines, because this allows us to
+@@ -329,16 +319,12 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ {
+ JDIMENSION n;
+ my_master_ptr master = (my_master_ptr)cinfo->master;
++ JSAMPARRAY scanlines = NULL;
+ void (*color_convert) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+ JDIMENSION input_row, JSAMPARRAY output_buf,
+ int num_rows) = NULL;
+ void (*color_quantize) (j_decompress_ptr cinfo, JSAMPARRAY input_buf,
+ JSAMPARRAY output_buf, int num_rows) = NULL;
+- void (*post_process_data) (j_decompress_ptr cinfo, JSAMPIMAGE input_buf,
+- JDIMENSION *in_row_group_ctr,
+- JDIMENSION in_row_groups_avail,
+- JSAMPARRAY output_buf, JDIMENSION *out_row_ctr,
+- JDIMENSION out_rows_avail) = NULL;
+
+ if (cinfo->cconvert && cinfo->cconvert->color_convert) {
+ color_convert = cinfo->cconvert->color_convert;
+@@ -350,23 +336,19 @@ read_and_discard_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->cquantize->color_quantize = noop_quantize;
+ }
+
+- if (master->using_merged_upsample && cinfo->post &&
+- cinfo->post->post_process_data) {
+- post_process_data = cinfo->post->post_process_data;
+- cinfo->post->post_process_data = noop_post_process;
++ if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
++ my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ scanlines = &upsample->spare_row;
+ }
+
+ for (n = 0; n < num_lines; n++)
+- jpeg_read_scanlines(cinfo, NULL, 1);
++ jpeg_read_scanlines(cinfo, scanlines, 1);
+
+ if (color_convert)
+ cinfo->cconvert->color_convert = color_convert;
+
+ if (color_quantize)
+ cinfo->cquantize->color_quantize = color_quantize;
+-
+- if (post_process_data)
+- cinfo->post->post_process_data = post_process_data;
+ }
+
+
+@@ -380,6 +362,12 @@ increment_simple_rowgroup_ctr(j_decompress_ptr cinfo, JDIMENSION rows)
+ {
+ JDIMENSION rows_left;
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
++ my_master_ptr master = (my_master_ptr)cinfo->master;
++
++ if (master->using_merged_upsample && cinfo->max_v_samp_factor == 2) {
++ read_and_discard_scanlines(cinfo, rows);
++ return;
++ }
+
+ /* Increment the counter to the next row group after the skipped rows. */
+ main_ptr->rowgroup_ctr += rows / cinfo->max_v_samp_factor;
+@@ -410,11 +398,16 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ my_main_ptr main_ptr = (my_main_ptr)cinfo->main;
+ my_coef_ptr coef = (my_coef_ptr)cinfo->coef;
+ my_master_ptr master = (my_master_ptr)cinfo->master;
++ my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+ JDIMENSION i, x;
+ int y;
+ JDIMENSION lines_per_iMCU_row, lines_left_in_iMCU_row, lines_after_iMCU_row;
+ JDIMENSION lines_to_skip, lines_to_read;
+
++ /* Two-pass color quantization is not supported. */
++ if (cinfo->quantize_colors && cinfo->two_pass_quantize)
++ ERREXIT(cinfo, JERR_NOTIMPL);
++
+ if (cinfo->global_state != DSTATE_SCANNING)
+ ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state);
+
+@@ -472,13 +465,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+ main_ptr->context_state = CTX_PREPARE_FOR_IMCU;
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->spare_full = FALSE;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample) {
+ upsample->next_row_out = cinfo->max_v_samp_factor;
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+ }
+@@ -493,13 +480,7 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_scanline += lines_left_in_iMCU_row;
+ main_ptr->buffer_full = FALSE;
+ main_ptr->rowgroup_ctr = 0;
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->spare_full = FALSE;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample) {
+ upsample->next_row_out = cinfo->max_v_samp_factor;
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+ }
+@@ -537,14 +518,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ cinfo->output_iMCU_row += lines_to_skip / lines_per_iMCU_row;
+ increment_simple_rowgroup_ctr(cinfo, lines_to_read);
+ }
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample =
+- (my_merged_upsample_ptr)cinfo->upsample;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample)
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- }
+ return num_lines;
+ }
+
+@@ -585,13 +560,8 @@ jpeg_skip_scanlines(j_decompress_ptr cinfo, JDIMENSION num_lines)
+ * bit odd, since "rows_to_go" seems to be redundantly keeping track of
+ * output_scanline.
+ */
+- if (master->using_merged_upsample) {
+- my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
++ if (!master->using_merged_upsample)
+ upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- } else {
+- my_upsample_ptr upsample = (my_upsample_ptr)cinfo->upsample;
+- upsample->rows_to_go = cinfo->output_height - cinfo->output_scanline;
+- }
+
+ /* Always skip the requested number of lines. */
+ return num_lines;
+diff --git a/libjpeg.txt b/libjpeg.txt
+index c50cf90..c233ecb 100644
+--- a/libjpeg.txt
++++ b/libjpeg.txt
+@@ -3,7 +3,7 @@ USING THE IJG JPEG LIBRARY
+ This file was part of the Independent JPEG Group's software:
+ Copyright (C) 1994-2013, Thomas G. Lane, Guido Vollbeding.
+ libjpeg-turbo Modifications:
+-Copyright (C) 2010, 2014-2018, D. R. Commander.
++Copyright (C) 2010, 2014-2018, 2020, D. R. Commander.
+ Copyright (C) 2015, Google, Inc.
+ For conditions of distribution and use, see the accompanying README.ijg file.
+
+@@ -750,7 +750,9 @@ multiple rows in the JPEG image.
+
+ Suspending data sources are not supported by this function. Calling
+ jpeg_skip_scanlines() with a suspending data source will result in undefined
+-behavior.
++behavior. Two-pass color quantization is also not supported by this function.
++Calling jpeg_skip_scanlines() with two-pass color quantization enabled will
++result in an error.
+
+ jpeg_skip_scanlines() will not allow skipping past the bottom of the image. If
+ the value of num_lines is large enough to skip past the bottom of the image,
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch b/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch
new file mode 100644
index 0000000000..68cf89e628
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2021-46822.patch
@@ -0,0 +1,133 @@
+From f35fd27ec641c42d6b115bfa595e483ec58188d2 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 6 Apr 2021 12:51:03 -0500
+Subject: [PATCH] tjLoadImage: Fix issues w/loading 16-bit PPMs/PGMs
+
+- The PPM reader now throws an error rather than segfaulting (due to a
+ buffer overrun) if an application attempts to load a 16-bit PPM file
+ into a grayscale uncompressed image buffer. No known applications
+ allowed that (not even the test applications in libjpeg-turbo),
+ because that mode of operation was never expected to work and did not
+ work under any circumstances. (In fact, it was necessary to modify
+ TJBench in order to reproduce the issue outside of a fuzzing
+ environment.) This was purely a matter of making the library bow out
+ gracefully rather than crash if an application tries to do something
+ really stupid.
+
+- The PPM reader now throws an error rather than generating incorrect
+ pixels if an application attempts to load a 16-bit PGM file into an
+ RGB uncompressed image buffer.
+
+- The PPM reader now correctly loads 16-bit PPM files into extended
+ RGB uncompressed image buffers. (Previously it generated incorrect
+ pixels unless the input colorspace was JCS_RGB or JCS_EXT_RGB.)
+
+The only way that users could have potentially encountered these issues
+was through the tjLoadImage() function. cjpeg and TJBench were
+unaffected.
+
+CVE: CVE-2021-46822
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/f35fd27ec641c42d6b115bfa595e483ec58188d2.patch]
+Comment: Refreshed hunks from ChangeLog.md
+ Refreshed hunks from rdppm.c
+
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+
+---
+ ChangeLog.md | 10 ++++++++++
+ rdppm.c | 26 ++++++++++++++++++++------
+ 2 files changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index 968969c6b..12e730a0e 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -44,6 +44,15 @@
+ that maximum value was less than 255. libjpeg-turbo 1.5.0 already included a
+ similar fix for binary PPM/PGM files with maximum values greater than 255.
+
++7. The PPM reader now throws an error, rather than segfaulting (due to a buffer
++overrun) or generating incorrect pixels, if an application attempts to use the
++`tjLoadImage()` function to load a 16-bit binary PPM file (a binary PPM file
++with a maximum value greater than 255) into a grayscale image buffer or to load
++a 16-bit binary PGM file into an RGB image buffer.
++
++8. Fixed an issue in the PPM reader that caused incorrect pixels to be
++generated when using the `tjLoadImage()` function to load a 16-bit binary PPM
++file into an extended RGB image buffer.
+
+ 2.0.3
+ =====
+diff --git a/rdppm.c b/rdppm.c
+index c4c937e8a..6ac8fdbf7 100644
+--- a/rdppm.c
++++ b/rdppm.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2009 by Bill Allombert, Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2015-2017, 2020, D. R. Commander.
++ * Copyright (C) 2015-2017, 2020-2021, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -516,6 +516,11 @@ get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ register JSAMPLE *rescale = source->rescale;
+ JDIMENSION col;
+ unsigned int maxval = source->maxval;
++ register int rindex = rgb_red[cinfo->in_color_space];
++ register int gindex = rgb_green[cinfo->in_color_space];
++ register int bindex = rgb_blue[cinfo->in_color_space];
++ register int aindex = alpha_index[cinfo->in_color_space];
++ register int ps = rgb_pixelsize[cinfo->in_color_space];
+
+ if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width))
+ ERREXIT(cinfo, JERR_INPUT_EOF);
+@@ -527,17 +532,20 @@ get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[rindex] = rescale[temp];
+ temp = UCH(*bufferptr++) << 8;
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[gindex] = rescale[temp];
+ temp = UCH(*bufferptr++) << 8;
+ temp |= UCH(*bufferptr++);
+ if (temp > maxval)
+ ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);
+- *ptr++ = rescale[temp];
++ ptr[bindex] = rescale[temp];
++ if (aindex >= 0)
++ ptr[aindex] = 0xFF;
++ ptr += ps;
+ }
+ return 1;
+ }
+@@ -624,7 +632,10 @@ start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
+ cinfo->in_color_space = JCS_GRAYSCALE;
+ TRACEMS2(cinfo, 1, JTRC_PGM, w, h);
+ if (maxval > 255) {
+- source->pub.get_pixel_rows = get_word_gray_row;
++ if (cinfo->in_color_space == JCS_GRAYSCALE)
++ source->pub.get_pixel_rows = get_word_gray_row;
++ else
++ ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE);
+ } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) &&
+ cinfo->in_color_space == JCS_GRAYSCALE) {
+ source->pub.get_pixel_rows = get_raw_row;
+@@ -657,7 +657,10 @@
+ cinfo->in_color_space = JCS_EXT_RGB;
+ TRACEMS2(cinfo, 1, JTRC_PPM, w, h);
+ if (maxval > 255) {
+- source->pub.get_pixel_rows = get_word_rgb_row;
++ if (IsExtRGB(cinfo->in_color_space))
++ source->pub.get_pixel_rows = get_word_rgb_row;
++ else
++ ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE);
+ } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) &&
+ (cinfo->in_color_space == JCS_EXT_RGB
+ #if RGB_RED == 0 && RGB_GREEN == 1 && RGB_BLUE == 2 && RGB_PIXELSIZE == 3
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
new file mode 100644
index 0000000000..6668f6e41d
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-1.patch
@@ -0,0 +1,97 @@
+From 9679473547874c472569d54fecce32b463999a9d Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Tue, 4 Apr 2023 19:06:20 -0500
+Subject: [PATCH] Decomp: Don't enable 2-pass color quant w/ RGB565
+
+The 2-pass color quantization algorithm assumes 3-sample pixels. RGB565
+is the only 3-component colorspace that doesn't have 3-sample pixels, so
+we need to treat it as a special case when determining whether to enable
+2-pass color quantization. Otherwise, attempting to initialize 2-pass
+color quantization with an RGB565 output buffer could cause
+prescan_quantize() to read from uninitialized memory and subsequently
+underflow/overflow the histogram array.
+
+djpeg is supposed to fail gracefully if both -rgb565 and -colors are
+specified, because none of its destination managers (image writers)
+support color quantization with RGB565. However, prescan_quantize() was
+called before that could occur. It is possible but very unlikely that
+these issues could have been reproduced in applications other than
+djpeg. The issues involve the use of two features (12-bit precision and
+RGB565) that are incompatible, and they also involve the use of two
+rarely-used legacy features (RGB565 and color quantization) that don't
+make much sense when combined.
+
+Fixes #668
+Fixes #671
+Fixes #680
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/9679473547874c472569d54fecce32b463999a9d]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 6 ++++++
+ jdmaster.c | 5 +++--
+ jquant2.c | 5 +++--
+ 3 files changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index e605abe73..de0c4d0dd 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -1,3 +1,9 @@ quality values.
++9. Fixed an oversight in 1.4 beta1[8] that caused various segfaults and buffer
++overruns when attempting to decompress various specially-crafted malformed
++12-bit-per-component JPEG images using a 12-bit-per-component build of djpeg
++(`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
++enabled.
++
+ 2.0.4
+ =====
+
+diff --git a/jdmaster.c b/jdmaster.c
+index b20906438..8d8ef9956 100644
+--- a/jdmaster.c
++++ b/jdmaster.c
+@@ -5,7 +5,7 @@
+ * Copyright (C) 1991-1997, Thomas G. Lane.
+ * Modified 2002-2009 by Guido Vollbeding.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009-2011, 2016, D. R. Commander.
++ * Copyright (C) 2009-2011, 2016, 2023, D. R. Commander.
+ * Copyright (C) 2013, Linaro Limited.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+@@ -492,7 +492,8 @@ master_selection(j_decompress_ptr cinfo)
+ if (cinfo->raw_data_out)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+ /* 2-pass quantizer only works in 3-component color space. */
+- if (cinfo->out_color_components != 3) {
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565) {
+ cinfo->enable_1pass_quant = TRUE;
+ cinfo->enable_external_quant = FALSE;
+ cinfo->enable_2pass_quant = FALSE;
+diff --git a/jquant2.c b/jquant2.c
+index 6570613bb..c760380fb 100644
+--- a/jquant2.c
++++ b/jquant2.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1991-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2009, 2014-2015, D. R. Commander.
++ * Copyright (C) 2009, 2014-2015, 2020, 2023, D. R. Commander.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+ *
+@@ -1230,7 +1230,8 @@ jinit_2pass_quantizer(j_decompress_ptr cinfo)
+ cquantize->error_limiter = NULL;
+
+ /* Make sure jdmaster didn't give me a case I can't handle */
+- if (cinfo->out_color_components != 3)
++ if (cinfo->out_color_components != 3 ||
++ cinfo->out_color_space == JCS_RGB565)
+ ERREXIT(cinfo, JERR_NOTIMPL);
+
+ /* Allocate the histogram/inverse colormap storage */
diff --git a/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
new file mode 100644
index 0000000000..bcba0b513d
--- /dev/null
+++ b/meta/recipes-graphics/jpeg/files/CVE-2023-2804-2.patch
@@ -0,0 +1,75 @@
+From 0deab87e24ab3106d5332205f829d1846fa65001 Mon Sep 17 00:00:00 2001
+From: DRC <information@libjpeg-turbo.org>
+Date: Thu, 6 Apr 2023 18:33:41 -0500
+Subject: [PATCH] jpeg_crop_scanline: Fix calc w/sclg + 2x4,4x2 samp
+
+When computing the downsampled width for a particular component,
+jpeg_crop_scanline() needs to take into account the fact that the
+libjpeg code uses a combination of IDCT scaling and upsampling to
+implement 4x2 and 2x4 upsampling with certain decompression scaling
+factors. Failing to account for that led to incomplete upsampling of
+4x2- or 2x4-subsampled components, which caused the color converter to
+read from uninitialized memory. With 12-bit data precision, this caused
+a buffer overrun or underrun and subsequent segfault if the
+uninitialized memory contained a value that was outside of the valid
+sample range (because the color converter uses the value as an array
+index.)
+
+Fixes #669
+
+CVE: CVE-2023-2804
+Upstream-Status: Backport [https://github.com/libjpeg-turbo/libjpeg-turbo/commit/0deab87e24ab3106d5332205f829d1846fa65001]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+---
+ ChangeLog.md | 8 ++++++++
+ jdapistd.c | 10 ++++++----
+ 2 files changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/ChangeLog.md b/ChangeLog.md
+index de0c4d0dd..159bd1610 100644
+--- a/ChangeLog.md
++++ b/ChangeLog.md
+@@ -4,6 +4,14 @@ overruns when attempting to decompress various specially-crafted malformed
+ (`-DWITH_12BIT=1`) with both color quantization and RGB565 color conversion
+ enabled.
+
++10. Fixed an issue whereby `jpeg_crop_scanline()` sometimes miscalculated the
++downsampled width for components with 4x2 or 2x4 subsampling factors if
++decompression scaling was enabled. This caused the components to be upsampled
++incompletely, which caused the color converter to read from uninitialized
++memory. With 12-bit data precision, this caused a buffer overrun or underrun
++and subsequent segfault if the sample value read from unitialized memory was
++outside of the valid sample range.
++
+ 2.0.4
+ =====
+
+diff --git a/jdapistd.c b/jdapistd.c
+index 628626254..eb577928c 100644
+--- a/jdapistd.c
++++ b/jdapistd.c
+@@ -4,7 +4,7 @@
+ * This file was part of the Independent JPEG Group's software:
+ * Copyright (C) 1994-1996, Thomas G. Lane.
+ * libjpeg-turbo Modifications:
+- * Copyright (C) 2010, 2015-2018, 2020, D. R. Commander.
++ * Copyright (C) 2010, 2015-2018, 2020, 2023, D. R. Commander.
+ * Copyright (C) 2015, Google, Inc.
+ * For conditions of distribution and use, see the accompanying README.ijg
+ * file.
+@@ -225,9 +225,11 @@ jpeg_crop_scanline(j_decompress_ptr cinfo, JDIMENSION *xoffset,
+ /* Set downsampled_width to the new output width. */
+ orig_downsampled_width = compptr->downsampled_width;
+ compptr->downsampled_width =
+- (JDIMENSION)jdiv_round_up((long)(cinfo->output_width *
+- compptr->h_samp_factor),
+- (long)cinfo->max_h_samp_factor);
++ (JDIMENSION)jdiv_round_up((long)cinfo->output_width *
++ (long)(compptr->h_samp_factor *
++ compptr->_DCT_scaled_size),
++ (long)(cinfo->max_h_samp_factor *
++ cinfo->_min_DCT_scaled_size));
+ if (compptr->downsampled_width < 2 && orig_downsampled_width >= 2)
+ reinit_upsampler = TRUE;
+
diff --git a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
index 3005a8a789..fda425c219 100644
--- a/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
+++ b/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb
@@ -13,6 +13,11 @@ DEPENDS_append_x86_class-target = " nasm-native"
SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \
file://0001-libjpeg-turbo-fix-package_qa-error.patch \
file://CVE-2020-13790.patch \
+ file://CVE-2021-46822.patch \
+ file://CVE-2020-35538-1.patch \
+ file://CVE-2020-35538-2.patch \
+ file://CVE-2023-2804-1.patch \
+ file://CVE-2023-2804-2.patch \
"
SRC_URI[md5sum] = "d01d9e0c28c27bc0de9f4e2e8ff49855"
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch
new file mode 100644
index 0000000000..a4ed7ab8e6
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2021-33657.patch
@@ -0,0 +1,38 @@
+From 8c91cf7dba5193f5ce12d06db1336515851c9ee9 Mon Sep 17 00:00:00 2001
+From: Sam Lantinga <slouken@libsdl.org>
+Date: Tue, 30 Nov 2021 12:36:46 -0800
+Subject: [PATCH] Always create a full 256-entry map in case color values are
+ out of range
+
+Fixes https://github.com/libsdl-org/SDL/issues/5042
+
+CVE: CVE-2021-33657
+Upstream-Status: Backport [https://github.com/libsdl-org/SDL/commit/8c91cf7dba5193f5ce12d06db1336515851c9ee9.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/video/SDL_pixels.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/video/SDL_pixels.c b/src/video/SDL_pixels.c
+index ac04533c5d5..9bb02f771d0 100644
+--- a/src/video/SDL_pixels.c
++++ b/src/video/SDL_pixels.c
+@@ -947,7 +947,7 @@ Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical)
+ }
+ *identical = 0;
+ }
+- map = (Uint8 *) SDL_malloc(src->ncolors);
++ map = (Uint8 *) SDL_calloc(256, sizeof(Uint8));
+ if (map == NULL) {
+ SDL_OutOfMemory();
+ return (NULL);
+@@ -971,7 +971,7 @@ Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod,
+ SDL_Palette *pal = src->palette;
+
+ bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel);
+- map = (Uint8 *) SDL_malloc(pal->ncolors * bpp);
++ map = (Uint8 *) SDL_calloc(256, bpp);
+ if (map == NULL) {
+ SDL_OutOfMemory();
+ return (NULL);
diff --git a/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch
new file mode 100644
index 0000000000..b02a2169a6
--- /dev/null
+++ b/meta/recipes-graphics/libsdl2/libsdl2/CVE-2022-4743.patch
@@ -0,0 +1,38 @@
+From 00b67f55727bc0944c3266e2b875440da132ce4b Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Wed, 21 Sep 2022 10:30:38 +0800
+Subject: [PATCH] Fix potential memory leak in GLES_CreateTexture
+
+
+CVE: CVE-2022-4743
+Upstream-Status: Backport [https://github.com/libsdl-org/SDL/commit/00b67f55727bc0944c3266e2b875440da132ce4b.patch]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+
+---
+ src/render/opengles/SDL_render_gles.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/render/opengles/SDL_render_gles.c b/src/render/opengles/SDL_render_gles.c
+index a5fbab309eda..ba08a46e2805 100644
+--- a/src/render/opengles/SDL_render_gles.c
++++ b/src/render/opengles/SDL_render_gles.c
+@@ -359,6 +359,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+ renderdata->glGenTextures(1, &data->texture);
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glGenTextures()", result);
+ }
+@@ -387,6 +390,9 @@ GLES_CreateTexture(SDL_Renderer * renderer, SDL_Texture * texture)
+
+ result = renderdata->glGetError();
+ if (result != GL_NO_ERROR) {
++ if (texture->access == SDL_TEXTUREACCESS_STREAMING) {
++ SDL_free(data->pixels);
++ }
+ SDL_free(data);
+ return GLES_SetError("glTexImage2D()", result);
+ }
diff --git a/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb b/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
index 8e77c18f2d..fa29bc99ac 100644
--- a/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
+++ b/meta/recipes-graphics/libsdl2/libsdl2_2.0.12.bb
@@ -21,6 +21,8 @@ SRC_URI = "http://www.libsdl.org/release/SDL2-${PV}.tar.gz \
file://directfb-spurious-curly-brace-missing-e.patch \
file://directfb-renderfillrect-fix.patch \
file://CVE-2020-14409-14410.patch \
+ file://CVE-2021-33657.patch \
+ file://CVE-2022-4743.patch \
"
S = "${WORKDIR}/SDL2-${PV}"
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch
new file mode 100644
index 0000000000..4a277bd4d0
--- /dev/null
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer/CVE-2022-0135.patch
@@ -0,0 +1,100 @@
+From 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec Mon Sep 17 00:00:00 2001
+From: Gert Wollny <gert.wollny@collabora.com>
+Date: Tue, 30 Nov 2021 10:17:26 +0100
+Subject: [PATCH] vrend: Add test to resource OOB write and fix it
+
+v2: Also check that no depth != 1 has been send when none is due
+
+Closes: #250
+Signed-off-by: Gert Wollny <gert.wollny@collabora.com>
+Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
+
+https://gitlab.freedesktop.org/virgl/virglrenderer/-/commit/95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
+Upstream-Status: Backport
+CVE: CVE-2022-0135
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/vrend_renderer.c | 3 +++
+ tests/test_fuzzer_formats.c | 43 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 46 insertions(+)
+
+diff --git a/src/vrend_renderer.c b/src/vrend_renderer.c
+index 28f669727..357b81b20 100644
+--- a/src/vrend_renderer.c
++++ b/src/vrend_renderer.c
+@@ -7833,8 +7833,11 @@ static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
+ info->box->height) * elsize;
+ if (res->target == GL_TEXTURE_3D ||
+ res->target == GL_TEXTURE_2D_ARRAY ||
++ res->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY ||
+ res->target == GL_TEXTURE_CUBE_MAP_ARRAY)
+ send_size *= info->box->depth;
++ else if (need_temp && info->box->depth != 1)
++ return EINVAL;
+
+ if (need_temp) {
+ data = malloc(send_size);
+diff --git a/tests/test_fuzzer_formats.c b/tests/test_fuzzer_formats.c
+index 59d6fb671..2de9a9a3f 100644
+--- a/tests/test_fuzzer_formats.c
++++ b/tests/test_fuzzer_formats.c
+@@ -957,6 +957,48 @@ static void test_vrend_set_signle_abo_heap_overflow() {
+ virgl_renderer_submit_cmd((void *) cmd, ctx_id, 0xde);
+ }
+
++/* Test adapted from yaojun8558363@gmail.com:
++ * https://gitlab.freedesktop.org/virgl/virglrenderer/-/issues/250
++*/
++static void test_vrend_3d_resource_overflow() {
++
++ struct virgl_renderer_resource_create_args resource;
++ resource.handle = 0x4c474572;
++ resource.target = PIPE_TEXTURE_2D_ARRAY;
++ resource.format = VIRGL_FORMAT_Z24X8_UNORM;
++ resource.nr_samples = 2;
++ resource.last_level = 0;
++ resource.array_size = 3;
++ resource.bind = VIRGL_BIND_SAMPLER_VIEW;
++ resource.depth = 1;
++ resource.width = 8;
++ resource.height = 4;
++ resource.flags = 0;
++
++ virgl_renderer_resource_create(&resource, NULL, 0);
++ virgl_renderer_ctx_attach_resource(ctx_id, resource.handle);
++
++ uint32_t size = 0x400;
++ uint32_t cmd[size];
++ int i = 0;
++ cmd[i++] = (size - 1) << 16 | 0 << 8 | VIRGL_CCMD_RESOURCE_INLINE_WRITE;
++ cmd[i++] = resource.handle;
++ cmd[i++] = 0; // level
++ cmd[i++] = 0; // usage
++ cmd[i++] = 0; // stride
++ cmd[i++] = 0; // layer_stride
++ cmd[i++] = 0; // x
++ cmd[i++] = 0; // y
++ cmd[i++] = 0; // z
++ cmd[i++] = 8; // w
++ cmd[i++] = 4; // h
++ cmd[i++] = 3; // d
++ memset(&cmd[i], 0, size - i);
++
++ virgl_renderer_submit_cmd((void *) cmd, ctx_id, size);
++}
++
++
+ int main()
+ {
+ initialize_environment();
+@@ -979,6 +1021,7 @@ int main()
+ test_cs_nullpointer_deference();
+ test_vrend_set_signle_abo_heap_overflow();
+
++ test_vrend_3d_resource_overflow();
+
+ virgl_renderer_context_destroy(ctx_id);
+ virgl_renderer_cleanup(&cookie);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
index 31c45ef89c..8185d6f7e8 100644
--- a/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
+++ b/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb
@@ -13,6 +13,7 @@ SRCREV = "7d204f3927be65fb3365dce01dbcd04d447a4985"
SRC_URI = "git://anongit.freedesktop.org/git/virglrenderer;branch=master \
file://0001-gallium-Expand-libc-check-to-be-platform-OS-check.patch \
file://0001-meson.build-use-python3-directly-for-python.patch \
+ file://CVE-2022-0135.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-graphics/vulkan/assimp_5.0.1.bb b/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
index 295ac12fc5..0774f37e31 100644
--- a/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
+++ b/meta/recipes-graphics/vulkan/assimp_5.0.1.bb
@@ -8,7 +8,7 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=2119edef0916b0bd511cb3c731076271"
DEPENDS = "zlib"
-SRC_URI = "git://github.com/assimp/assimp.git;branch=assimp_5.0_release;protocol=https \
+SRC_URI = "git://github.com/assimp/assimp.git;nobranch=1;protocol=https \
file://0001-closes-https-github.com-assimp-assimp-issues-2733-up.patch \
file://0001-Use-ASSIMP_LIB_INSTALL_DIR-to-search-library.patch \
"
diff --git a/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
new file mode 100644
index 0000000000..df204508e9
--- /dev/null
+++ b/meta/recipes-graphics/wayland/wayland/CVE-2021-3782.patch
@@ -0,0 +1,111 @@
+From 5eed6609619cc2e4eaa8618d11c15d442abf54be Mon Sep 17 00:00:00 2001
+From: Derek Foreman <derek.foreman@collabora.com>
+Date: Fri, 28 Jan 2022 13:18:37 -0600
+Subject: [PATCH] util: Limit size of wl_map
+
+Since server IDs are basically indistinguishable from really big client
+IDs at many points in the source, it's theoretically possible to overflow
+a map and either overflow server IDs into the client ID space, or grow
+client IDs into the server ID space. This would currently take a massive
+amount of RAM, but the definition of massive changes yearly.
+
+Prevent this by placing a ridiculous but arbitrary upper bound on the
+number of items we can put in a map: 0xF00000, somewhere over 15 million.
+This should satisfy pathological clients without restriction, but stays
+well clear of the 0xFF000000 transition point between server and client
+IDs. It will still take an improbable amount of RAM to hit this, and a
+client could still exhaust all RAM in this way, but our goal is to prevent
+overflow and undefined behaviour.
+
+Fixes #224
+
+Signed-off-by: Derek Foreman <derek.foreman@collabora.com>
+
+Upstream-Status: Backport
+CVE: CVE-2021-3782
+
+Reference to upstream patch:
+https://gitlab.freedesktop.org/wayland/wayland/-/commit/b19488c7154b902354cb26a27f11415d7799b0b2
+
+[DP: adjust context for wayland version 1.20.0]
+Signed-off-by: Dragos-Marian Panait <dragos.panait@windriver.com>
+---
+ src/wayland-private.h | 1 +
+ src/wayland-util.c | 25 +++++++++++++++++++++++--
+ 2 files changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/src/wayland-private.h b/src/wayland-private.h
+index 9bf8cb7..35dc40e 100644
+--- a/src/wayland-private.h
++++ b/src/wayland-private.h
+@@ -45,6 +45,7 @@
+ #define WL_MAP_SERVER_SIDE 0
+ #define WL_MAP_CLIENT_SIDE 1
+ #define WL_SERVER_ID_START 0xff000000
++#define WL_MAP_MAX_OBJECTS 0x00f00000
+ #define WL_CLOSURE_MAX_ARGS 20
+
+ struct wl_object {
+diff --git a/src/wayland-util.c b/src/wayland-util.c
+index d5973bf..3e45d19 100644
+--- a/src/wayland-util.c
++++ b/src/wayland-util.c
+@@ -195,6 +195,7 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ union map_entry *start, *entry;
+ struct wl_array *entries;
+ uint32_t base;
++ uint32_t count;
+
+ if (map->side == WL_MAP_CLIENT_SIDE) {
+ entries = &map->client_entries;
+@@ -215,10 +216,25 @@ wl_map_insert_new(struct wl_map *map, uint32_t flags, void *data)
+ start = entries->data;
+ }
+
++ /* wl_array only grows, so if we have too many objects at
++ * this point there's no way to clean up. We could be more
++ * pro-active about trying to avoid this allocation, but
++ * it doesn't really matter because at this point there is
++ * nothing to be done but disconnect the client and delete
++ * the whole array either way.
++ */
++ count = entry - start;
++ if (count > WL_MAP_MAX_OBJECTS) {
++ /* entry->data is freshly malloced garbage, so we'd
++ * better make it a NULL so wl_map_for_each doesn't
++ * dereference it later. */
++ entry->data = NULL;
++ return 0;
++ }
+ entry->data = data;
+ entry->next |= (flags & 0x1) << 1;
+
+- return (entry - start) + base;
++ return count + base;
+ }
+
+ int
+@@ -235,6 +251,9 @@ wl_map_insert_at(struct wl_map *map, uint32_t flags, uint32_t i, void *data)
+ i -= WL_SERVER_ID_START;
+ }
+
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
++
+ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+@@ -269,8 +288,10 @@ wl_map_reserve_new(struct wl_map *map, uint32_t i)
+ i -= WL_SERVER_ID_START;
+ }
+
+- count = entries->size / sizeof *start;
++ if (i > WL_MAP_MAX_OBJECTS)
++ return -1;
+
++ count = entries->size / sizeof *start;
+ if (count < i)
+ return -1;
+
+--
+2.37.3
diff --git a/meta/recipes-graphics/wayland/wayland_1.18.0.bb b/meta/recipes-graphics/wayland/wayland_1.18.0.bb
index 00be3aac27..e621abddbf 100644
--- a/meta/recipes-graphics/wayland/wayland_1.18.0.bb
+++ b/meta/recipes-graphics/wayland/wayland_1.18.0.bb
@@ -18,6 +18,7 @@ SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \
file://0002-Do-not-hardcode-the-path-to-wayland-scanner.patch \
file://0001-build-Fix-strndup-detection-on-MinGW.patch \
file://0001-meson-tests-add-missing-dependencies-on-protocol-hea.patch \
+ file://CVE-2021-3782.patch \
"
SRC_URI[md5sum] = "23317697b6e3ff2e1ac8c5ba3ed57b65"
SRC_URI[sha256sum] = "4675a79f091020817a98fd0484e7208c8762242266967f55a67776936c2e294d"
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
new file mode 100644
index 0000000000..fb61195225
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3554.patch
@@ -0,0 +1,58 @@
+From 8b51d1375a4dd6a7cf3a919da83d8e87e57e7333 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 2 Nov 2022 17:04:15 +0530
+Subject: [PATCH] CVE-2022-3554
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/1d11822601fd24a396b354fa616b04ed3df8b4ef]
+CVE: CVE-2022-3554
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+fix a memory leak in XRegisterIMInstantiateCallback
+
+Analysis:
+
+ _XimRegisterIMInstantiateCallback() opens an XIM and closes it using
+ the internal function pointers, but the internal close function does
+ not free the pointer to the XIM (this would be done in XCloseIM()).
+
+Report/patch:
+
+ Date: Mon, 03 Oct 2022 18:47:32 +0800
+ From: Po Lu <luangruo@yahoo.com>
+ To: xorg-devel@lists.x.org
+ Subject: Re: Yet another leak in Xlib
+
+ For reference, here's how I'm calling XRegisterIMInstantiateCallback:
+
+ XSetLocaleModifiers ("");
+ XRegisterIMInstantiateCallback (compositor.display,
+ XrmGetDatabase (compositor.display),
+ (char *) compositor.resource_name,
+ (char *) compositor.app_name,
+ IMInstantiateCallback, NULL);
+ and XMODIFIERS is:
+
+ @im=ibus
+
+Signed-off-by: Thomas E. Dickey's avatarThomas E. Dickey <dickey@invisible-island.net>
+---
+ modules/im/ximcp/imInsClbk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/modules/im/ximcp/imInsClbk.c b/modules/im/ximcp/imInsClbk.c
+index 961aaba..0a8a874 100644
+--- a/modules/im/ximcp/imInsClbk.c
++++ b/modules/im/ximcp/imInsClbk.c
+@@ -204,6 +204,9 @@ _XimRegisterIMInstantiateCallback(
+ if( xim ) {
+ lock = True;
+ xim->methods->close( (XIM)xim );
++ /* XIMs must be freed manually after being opened; close just
++ does the protocol to deinitialize the IM. */
++ XFree( xim );
+ lock = False;
+ icb->call = True;
+ callback( display, client_data, NULL );
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
new file mode 100644
index 0000000000..855ce80e77
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2022-3555.patch
@@ -0,0 +1,38 @@
+From 8a368d808fec166b5fb3dfe6312aab22c7ee20af Mon Sep 17 00:00:00 2001
+From: Hodong <hodong@yozmos.com>
+Date: Thu, 20 Jan 2022 00:57:41 +0900
+Subject: [PATCH] Fix two memory leaks in _XFreeX11XCBStructure()
+
+Even when XCloseDisplay() was called, some memory was leaked.
+
+XCloseDisplay() calls _XFreeDisplayStructure(), which calls
+_XFreeX11XCBStructure().
+
+However, _XFreeX11XCBStructure() did not destroy the condition variables,
+resulting in the leaking of some 40 bytes.
+
+Signed-off-by: Hodong <hodong@yozmos.com>
+
+Upstream-Status: Backport from [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/8a368d808fec166b5fb3dfe6312aab22c7ee20af]
+CVE:CVE-2022-3555
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/xcb_disp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/xcb_disp.c b/src/xcb_disp.c
+index 70a602f4..e9becee3 100644
+--- a/src/xcb_disp.c
++++ b/src/xcb_disp.c
+@@ -102,6 +102,8 @@ void _XFreeX11XCBStructure(Display *dpy)
+ dpy->xcb->pending_requests = tmp->next;
+ free(tmp);
+ }
++ xcondition_clear(dpy->xcb->event_notify);
++ xcondition_clear(dpy->xcb->reply_notify);
+ xcondition_free(dpy->xcb->event_notify);
+ xcondition_free(dpy->xcb->reply_notify);
+ Xfree(dpy->xcb);
+--
+2.18.2
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
new file mode 100644
index 0000000000..c724cf8fdd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-3138.patch
@@ -0,0 +1,111 @@
+From 304a654a0d57bf0f00d8998185f0360332cfa36c Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sat, 10 Jun 2023 16:30:07 -0700
+Subject: [PATCH] InitExt.c: Add bounds checks for extension request, event, &
+ error codes
+
+Fixes CVE-2023-3138: X servers could return values from XQueryExtension
+that would cause Xlib to write entries out-of-bounds of the arrays to
+store them, though this would only overwrite other parts of the Display
+struct, not outside the bounds allocated for that structure.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+CVE: CVE-2023-3138
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/304a654a0d57bf0f00d8998185f0360332cfa36c.patch]
+Signed-off-by: Poonam Jadhav <poonam.jadhav@kpit.com>
+---
+ src/InitExt.c | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+diff --git a/src/InitExt.c b/src/InitExt.c
+index 4de46f15..afc00a6b 100644
+--- a/src/InitExt.c
++++ b/src/InitExt.c
+@@ -33,6 +33,18 @@ from The Open Group.
+ #include <X11/Xos.h>
+ #include <stdio.h>
+
++/* The X11 protocol spec reserves events 64 through 127 for extensions */
++#ifndef LastExtensionEvent
++#define LastExtensionEvent 127
++#endif
++
++/* The X11 protocol spec reserves requests 128 through 255 for extensions */
++#ifndef LastExtensionRequest
++#define FirstExtensionRequest 128
++#define LastExtensionRequest 255
++#endif
++
++
+ /*
+ * This routine is used to link a extension in so it will be called
+ * at appropriate times.
+@@ -242,6 +254,12 @@ WireToEventType XESetWireToEvent(
+ WireToEventType proc) /* routine to call when converting event */
+ {
+ register WireToEventType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (WireToEventType)_XUnknownWireEvent;
++ }
+ if (proc == NULL) proc = (WireToEventType)_XUnknownWireEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->event_vec[event_number];
+@@ -263,6 +281,12 @@ WireToEventCookieType XESetWireToEventCookie(
+ )
+ {
+ WireToEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (WireToEventCookieType)_XUnknownWireEventCookie;
++ }
+ if (proc == NULL) proc = (WireToEventCookieType)_XUnknownWireEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_vec[extension & 0x7F];
+@@ -284,6 +308,12 @@ CopyEventCookieType XESetCopyEventCookie(
+ )
+ {
+ CopyEventCookieType oldproc;
++ if (extension < FirstExtensionRequest ||
++ extension > LastExtensionRequest) {
++ fprintf(stderr, "Xlib: ignoring invalid extension opcode %d\n",
++ extension);
++ return (CopyEventCookieType)_XUnknownCopyEventCookie;
++ }
+ if (proc == NULL) proc = (CopyEventCookieType)_XUnknownCopyEventCookie;
+ LockDisplay (dpy);
+ oldproc = dpy->generic_event_copy_vec[extension & 0x7F];
+@@ -305,6 +335,12 @@ EventToWireType XESetEventToWire(
+ EventToWireType proc) /* routine to call when converting event */
+ {
+ register EventToWireType oldproc;
++ if (event_number < 0 ||
++ event_number > LastExtensionEvent) {
++ fprintf(stderr, "Xlib: ignoring invalid extension event %d\n",
++ event_number);
++ return (EventToWireType)_XUnknownNativeEvent;
++ }
+ if (proc == NULL) proc = (EventToWireType) _XUnknownNativeEvent;
+ LockDisplay (dpy);
+ oldproc = dpy->wire_vec[event_number];
+@@ -325,6 +361,12 @@ WireToErrorType XESetWireToError(
+ WireToErrorType proc) /* routine to call when converting error */
+ {
+ register WireToErrorType oldproc = NULL;
++ if (error_number < 0 ||
++ error_number > LastExtensionError) {
++ fprintf(stderr, "Xlib: ignoring invalid extension error %d\n",
++ error_number);
++ return (WireToErrorType)_XDefaultWireError;
++ }
+ if (proc == NULL) proc = (WireToErrorType)_XDefaultWireError;
+ LockDisplay (dpy);
+ if (!dpy->error_vec) {
+--
+GitLab
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
new file mode 100644
index 0000000000..dbdf096fc8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43785.patch
@@ -0,0 +1,63 @@
+From 6858d468d9ca55fb4c5fd70b223dbc78a3358a7f Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Sun, 17 Sep 2023 14:19:40 -0700
+Subject: [PATCH libX11 1/5] CVE-2023-43785: out-of-bounds memory access in
+ _XkbReadKeySyms()
+
+Make sure we allocate enough memory in the first place, and
+also handle error returns from _XkbReadBufferCopyKeySyms() when
+it detects out-of-bounds issues.
+
+Reported-by: Gregory James DUCK <gjduck@gmail.com>
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0001-CVE-2023-43785-out-of-bounds-memory-access-in-_XkbRe.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/6858d468d9ca55fb4c5fd70b223dbc78a3358a7f]
+CVE: CVE-2023-43785
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/xkb/XKBGetMap.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/src/xkb/XKBGetMap.c b/src/xkb/XKBGetMap.c
+index 2891d21e..31199e4a 100644
+--- a/src/xkb/XKBGetMap.c
++++ b/src/xkb/XKBGetMap.c
+@@ -182,7 +182,8 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ if (offset + newMap->nSyms >= map->size_syms) {
+ register int sz;
+
+- sz = map->size_syms + 128;
++ sz = offset + newMap->nSyms;
++ sz = ((sz + (unsigned) 128) / 128) * 128;
+ _XkbResizeArray(map->syms, map->size_syms, sz, KeySym);
+ if (map->syms == NULL) {
+ map->size_syms = 0;
+@@ -191,8 +192,9 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ map->size_syms = sz;
+ }
+ if (newMap->nSyms > 0) {
+- _XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
+- newMap->nSyms);
++ if (_XkbReadBufferCopyKeySyms(buf, (KeySym *) &map->syms[offset],
++ newMap->nSyms) == 0)
++ return BadLength;
+ offset += newMap->nSyms;
+ }
+ else {
+@@ -222,8 +224,10 @@ _XkbReadKeySyms(XkbReadBufferPtr buf, XkbDescPtr xkb, xkbGetMapReply *rep)
+ newSyms = XkbResizeKeySyms(xkb, i + rep->firstKeySym, tmp);
+ if (newSyms == NULL)
+ return BadAlloc;
+- if (newMap->nSyms > 0)
+- _XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms);
++ if (newMap->nSyms > 0) {
++ if (_XkbReadBufferCopyKeySyms(buf, newSyms, newMap->nSyms) == 0)
++ return BadLength;
++ }
+ else
+ newSyms[0] = NoSymbol;
+ oldMap->kt_index[0] = newMap->ktIndex[0];
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch
new file mode 100644
index 0000000000..31a99eb4ac
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-1.patch
@@ -0,0 +1,42 @@
+From 204c3393c4c90a29ed6bef64e43849536e863a86 Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:54:30 -0700
+Subject: [PATCH libX11 2/5] CVE-2023-43786: stack exhaustion from infinite
+ recursion in PutSubImage()
+
+When splitting a single line of pixels into chunks to send to the
+X server, be sure to take into account the number of bits per pixel,
+so we don't just loop forever trying to send more pixels than fit in
+the given request size and not breaking them down into a small enough
+chunk to fix.
+
+Fixes: "almost complete rewrite" (Dec. 12, 1987) from X11R2
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0002-CVE-2023-43786-stack-exhaustion-from-infinite-recurs.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/204c3393c4c90a29ed6bef64e43849536e863a86]
+CVE: CVE-2023-43786
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/PutImage.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index 857ee916..a6db7b42 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -914,8 +914,9 @@ PutSubImage (
+ req_width, req_height - SubImageHeight,
+ dest_bits_per_pixel, dest_scanline_pad);
+ } else {
+- int SubImageWidth = (((Available << 3) / dest_scanline_pad)
+- * dest_scanline_pad) - left_pad;
++ int SubImageWidth = ((((Available << 3) / dest_scanline_pad)
++ * dest_scanline_pad) - left_pad)
++ / dest_bits_per_pixel;
+
+ PutSubImage(dpy, d, gc, image, req_xoffset, req_yoffset, x, y,
+ (unsigned int) SubImageWidth, 1,
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch
new file mode 100644
index 0000000000..4800bedf41
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43786-2.patch
@@ -0,0 +1,46 @@
+From 73a37d5f2fcadd6540159b432a70d80f442ddf4a Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 15:55:04 -0700
+Subject: [PATCH libX11 3/5] XPutImage: clip images to maximum height & width
+ allowed by protocol
+
+The PutImage request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), same as the maximum dimensions of an X11
+Drawable, which the image is being copied to.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0003-XPutImage-clip-images-to-maximum-height-width-allowe.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/73a37d5f2fcadd6540159b432a70d80f442ddf4a]
+CVE: CVE-2023-43786
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/PutImage.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/src/PutImage.c b/src/PutImage.c
+index a6db7b42..ba411e36 100644
+--- a/src/PutImage.c
++++ b/src/PutImage.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include "Xlibint.h"
+ #include "Xutil.h"
+ #include <stdio.h>
++#include <limits.h>
+ #include "Cr.h"
+ #include "ImUtil.h"
+ #include "reallocarray.h"
+@@ -962,6 +963,10 @@ XPutImage (
+ height = image->height - req_yoffset;
+ if ((width <= 0) || (height <= 0))
+ return 0;
++ if (width > USHRT_MAX)
++ width = USHRT_MAX;
++ if (height > USHRT_MAX)
++ height = USHRT_MAX;
+
+ if ((image->bits_per_pixel == 1) || (image->format != ZPixmap)) {
+ dest_bits_per_pixel = 1;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch
new file mode 100644
index 0000000000..d35d96c4dc
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-1.patch
@@ -0,0 +1,52 @@
+From b4031fc023816aca07fbd592ed97010b9b48784b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Thu, 7 Sep 2023 16:12:27 -0700
+Subject: [PATCH libX11 4/5] XCreatePixmap: trigger BadValue error for
+ out-of-range dimensions
+
+The CreatePixmap request specifies height & width of the image as CARD16
+(unsigned 16-bit integer), so if either is larger than that, set it to 0
+so the X server returns a BadValue error as the protocol requires.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0004-XCreatePixmap-trigger-BadValue-error-for-out-of-rang.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/b4031fc023816aca07fbd592ed97010b9b48784b]
+CVE: CVE-2023-43787
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/CrPixmap.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/src/CrPixmap.c b/src/CrPixmap.c
+index cdf31207..3cb2ca6d 100644
+--- a/src/CrPixmap.c
++++ b/src/CrPixmap.c
+@@ -28,6 +28,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <config.h>
+ #endif
+ #include "Xlibint.h"
++#include <limits.h>
+
+ #ifdef USE_DYNAMIC_XCURSOR
+ void
+@@ -47,6 +48,16 @@ Pixmap XCreatePixmap (
+ Pixmap pid;
+ register xCreatePixmapReq *req;
+
++ /*
++ * Force a BadValue X Error if the requested dimensions are larger
++ * than the X11 protocol has room for, since that's how callers expect
++ * to get notified of errors.
++ */
++ if (width > USHRT_MAX)
++ width = 0;
++ if (height > USHRT_MAX)
++ height = 0;
++
+ LockDisplay(dpy);
+ GetReq(CreatePixmap, req);
+ req->drawable = d;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch
new file mode 100644
index 0000000000..110bd445df
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/libx11/CVE-2023-43787-2.patch
@@ -0,0 +1,64 @@
+From 7916869d16bdd115ac5be30a67c3749907aea6a0 Mon Sep 17 00:00:00 2001
+From: Yair Mizrahi <yairm@jfrog.com>
+Date: Thu, 7 Sep 2023 16:15:32 -0700
+Subject: [PATCH libX11 5/5] CVE-2023-43787: Integer overflow in XCreateImage()
+ leading to a heap overflow
+
+When the format is `Pixmap` it calculates the size of the image data as:
+ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+There is no validation on the `width` of the image, and so this
+calculation exceeds the capacity of a 4-byte integer, causing an overflow.
+
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libx11/tree/debian/patches/0005-CVE-2023-43787-Integer-overflow-in-XCreateImage-lead.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.freedesktop.org/xorg/lib/libx11/-/commit/7916869d16bdd115ac5be30a67c3749907aea6a0]
+CVE: CVE-2023-43787
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ImUtil.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/src/ImUtil.c b/src/ImUtil.c
+index 36f08a03..fbfad33e 100644
+--- a/src/ImUtil.c
++++ b/src/ImUtil.c
+@@ -30,6 +30,7 @@ in this Software without prior written authorization from The Open Group.
+ #include <X11/Xlibint.h>
+ #include <X11/Xutil.h>
+ #include <stdio.h>
++#include <limits.h>
+ #include "ImUtil.h"
+
+ static int _XDestroyImage(XImage *);
+@@ -361,13 +362,22 @@ XImage *XCreateImage (
+ /*
+ * compute per line accelerator.
+ */
+- {
+- if (format == ZPixmap)
++ if (format == ZPixmap) {
++ if ((INT_MAX / bits_per_pixel) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
+- else
++ ROUNDUP((bits_per_pixel * width), image->bitmap_pad);
++ } else {
++ if ((INT_MAX - offset) < width) {
++ Xfree(image);
++ return NULL;
++ }
++
+ min_bytes_per_line =
+- ROUNDUP((width + offset), image->bitmap_pad);
++ ROUNDUP((width + offset), image->bitmap_pad);
+ }
+ if (image_bytes_per_line == 0) {
+ image->bytes_per_line = min_bytes_per_line;
+--
+2.39.3
+
diff --git a/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb b/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
index ff2a6f7265..248889a1d4 100644
--- a/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
+++ b/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb
@@ -16,6 +16,14 @@ SRC_URI += "file://Fix-hanging-issue-in-_XReply.patch \
file://CVE-2020-14344.patch \
file://CVE-2020-14363.patch \
file://CVE-2021-31535.patch \
+ file://CVE-2022-3554.patch \
+ file://CVE-2022-3555.patch \
+ file://CVE-2023-3138.patch \
+ file://CVE-2023-43785.patch \
+ file://CVE-2023-43786-1.patch \
+ file://CVE-2023-43786-2.patch \
+ file://CVE-2023-43787-1.patch \
+ file://CVE-2023-43787-2.patch \
"
SRC_URI[md5sum] = "55adbfb6d4370ecac5e70598c4e7eed2"
diff --git a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
index fda8e32d2c..4694f911be 100644
--- a/meta/recipes-graphics/xorg-lib/libxpm_3.5.13.bb
+++ b/meta/recipes-graphics/xorg-lib/libxpm_3.5.17.bb
@@ -11,17 +11,18 @@ an extension of the monochrome XBM bitmap specificied in the X \
protocol."
LICENSE = "MIT"
-LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7"
+LIC_FILES_CHKSUM = "file://COPYING;md5=903942ebc9d807dfb68540f40bae5aff"
DEPENDS += "libxext libsm libxt gettext-native"
PE = "1"
XORG_PN = "libXpm"
+XORG_EXT = "tar.xz"
+EXTRA_OECONF += "--disable-open-zfile"
PACKAGES =+ "sxpm cxpm"
FILES_cxpm = "${bindir}/cxpm"
FILES_sxpm = "${bindir}/sxpm"
-SRC_URI[md5sum] = "6f0ecf8d103d528cfc803aa475137afa"
-SRC_URI[sha256sum] = "9cd1da57588b6cb71450eff2273ef6b657537a9ac4d02d0014228845b935ac25"
+SRC_URI[sha256sum] = "64b31f81019e7d388c822b0b28af8d51c4622b83f1f0cb6fa3fc95e271226e43"
BBCLASSEXTEND = "native"
diff --git a/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
new file mode 100644
index 0000000000..d54ae16b33
--- /dev/null
+++ b/meta/recipes-graphics/xorg-lib/pixman/CVE-2022-44638.patch
@@ -0,0 +1,34 @@
+CVE: CVE-2022-44638
+Upstream-Status: Backport
+Signed-off-by: Ross Burton <ross.burton@arm.com>
+Signed-off-by:Bhabu Bindu <bhabu.bindu@kpit.com>
+
+From a1f88e842e0216a5b4df1ab023caebe33c101395 Mon Sep 17 00:00:00 2001
+From: Matt Turner <mattst88@gmail.com>
+Date: Wed, 2 Nov 2022 12:07:32 -0400
+Subject: [PATCH] Avoid integer overflow leading to out-of-bounds write
+
+Thanks to Maddie Stone and Google's Project Zero for discovering this
+issue, providing a proof-of-concept, and a great analysis.
+
+Closes: https://gitlab.freedesktop.org/pixman/pixman/-/issues/63
+---
+ pixman/pixman-trap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pixman/pixman-trap.c b/pixman/pixman-trap.c
+index 91766fd..7560405 100644
+--- a/pixman/pixman-trap.c
++++ b/pixman/pixman-trap.c
+@@ -74,7 +74,7 @@ pixman_sample_floor_y (pixman_fixed_t y,
+
+ if (f < Y_FRAC_FIRST (n))
+ {
+- if (pixman_fixed_to_int (i) == 0x8000)
++ if (pixman_fixed_to_int (i) == 0xffff8000)
+ {
+ f = 0; /* saturate */
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb b/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
index 22e19ba069..5873c19bab 100644
--- a/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
+++ b/meta/recipes-graphics/xorg-lib/pixman_0.38.4.bb
@@ -10,6 +10,7 @@ DEPENDS = "zlib"
SRC_URI = "https://www.cairographics.org/releases/${BP}.tar.gz \
file://0001-ARM-qemu-related-workarounds-in-cpu-features-detecti.patch \
file://0001-test-utils-Check-for-FE_INVALID-definition-before-us.patch \
+ file://CVE-2022-44638.patch \
"
SRC_URI[md5sum] = "267a7af290f93f643a1bc74490d9fdd1"
SRC_URI[sha256sum] = "da66d6fd6e40aee70f7bd02e4f8f76fc3f006ec879d346bae6a723025cfbdde7"
diff --git a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
index a566eaa45e..1e8525d874 100644
--- a/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
+++ b/meta/recipes-graphics/xorg-lib/xorg-lib-common.inc
@@ -6,8 +6,9 @@ LICENSE = "MIT-X"
DEPENDS = "util-macros"
XORG_PN = "${BPN}"
+XORG_EXT ?= "tar.bz2"
-SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.tar.bz2"
+SRC_URI = "${XORG_MIRROR}/individual/lib/${XORG_PN}-${PV}.${XORG_EXT}"
S = "${WORKDIR}/${XORG_PN}-${PV}"
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch
new file mode 100644
index 0000000000..efec7b6b4e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3550.patch
@@ -0,0 +1,40 @@
+From d2dcbdc67c96c84dff301505072b0b7b022f1a14 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Sun, 4 Dec 2022 17:40:21 +0000
+Subject: [PATCH 1/3] xkb: proof GetCountedString against request length
+ attacks
+
+GetCountedString did a check for the whole string to be within the
+request buffer but not for the initial 2 bytes that contain the length
+field. A swapped client could send a malformed request to trigger a
+swaps() on those bytes, writing into random memory.
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Ustream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=11beef0b7f1ed290348e45618e5fa0d2bffcb72e]
+CVE: CVE-2022-3550
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ xkb/xkb.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/xkb/xkb.c b/xkb/xkb.c
+index 68c59df..bf8aaa3 100644
+--- a/xkb/xkb.c
++++ b/xkb/xkb.c
+@@ -5138,6 +5138,11 @@ _GetCountedString(char **wire_inout, ClientPtr client, char **str)
+ CARD16 len;
+
+ wire = *wire_inout;
++
++ if (client->req_len <
++ bytes_to_int32(wire + 2 - (char *) client->requestBuffer))
++ return BadValue;
++
+ len = *(CARD16 *) wire;
+ if (client->swapped) {
+ swaps(&len);
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch
new file mode 100644
index 0000000000..a3b977aac9
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3551.patch
@@ -0,0 +1,64 @@
+From d3787290f56165f5656ddd2123dbf676a32d0a68 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Sun, 4 Dec 2022 17:44:00 +0000
+Subject: [PATCH 2/3] xkb: fix some possible memleaks in XkbGetKbdByName
+
+GetComponentByName returns an allocated string, so let's free that if we
+fail somewhere.
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=18f91b950e22c2a342a4fbc55e9ddf7534a707d2]
+CVE: CVE-2022-3551
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ xkb/xkb.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+diff --git a/xkb/xkb.c b/xkb/xkb.c
+index bf8aaa3..f79d306 100644
+--- a/xkb/xkb.c
++++ b/xkb/xkb.c
+@@ -5908,19 +5908,31 @@ ProcXkbGetKbdByName(ClientPtr client)
+ xkb = dev->key->xkbInfo->desc;
+ status = Success;
+ str = (unsigned char *) &stuff[1];
+- if (GetComponentSpec(&str, TRUE, &status)) /* keymap, unsupported */
+- return BadMatch;
++ {
++ char *keymap = GetComponentSpec(&str, TRUE, &status); /* keymap, unsupported */
++ if (keymap) {
++ free(keymap);
++ return BadMatch;
++ }
++ }
+ names.keycodes = GetComponentSpec(&str, TRUE, &status);
+ names.types = GetComponentSpec(&str, TRUE, &status);
+ names.compat = GetComponentSpec(&str, TRUE, &status);
+ names.symbols = GetComponentSpec(&str, TRUE, &status);
+ names.geometry = GetComponentSpec(&str, TRUE, &status);
+- if (status != Success)
+- return status;
+- len = str - ((unsigned char *) stuff);
+- if ((XkbPaddedSize(len) / 4) != stuff->length)
+- return BadLength;
++ if (status == Success) {
++ len = str - ((unsigned char *) stuff);
++ if ((XkbPaddedSize(len) / 4) != stuff->length)
++ status = BadLength;
++ }
+
++ if (status != Success) {
++ free(names.keycodes);
++ free(names.types);
++ free(names.compat);
++ free(names.symbols);
++ free(names.geometry);
++ }
+ CHK_MASK_LEGAL(0x01, stuff->want, XkbGBN_AllComponentsMask);
+ CHK_MASK_LEGAL(0x02, stuff->need, XkbGBN_AllComponentsMask);
+
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch
new file mode 100644
index 0000000000..94cea77edc
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-3553.patch
@@ -0,0 +1,49 @@
+From 57ad2c03730d56f8432b6d66b29c0e5a9f9b1ec2 Mon Sep 17 00:00:00 2001
+From: Jeremy Huddleston Sequoia <jeremyhu@apple.com>
+Date: Sun, 4 Dec 2022 17:46:18 +0000
+Subject: [PATCH 3/3] xquartz: Fix a possible crash when editing the
+ Application menu due to mutaing immutable arrays
+
+Crashing on exception: -[__NSCFArray replaceObjectAtIndex:withObject:]: mutating method sent to immutable object
+
+Application Specific Backtrace 0:
+0 CoreFoundation 0x00007ff80d2c5e9b __exceptionPreprocess + 242
+1 libobjc.A.dylib 0x00007ff80d027e48 objc_exception_throw + 48
+2 CoreFoundation 0x00007ff80d38167b _CFThrowFormattedException + 194
+3 CoreFoundation 0x00007ff80d382a25 -[__NSCFArray removeObjectAtIndex:].cold.1 + 0
+4 CoreFoundation 0x00007ff80d2e6c0b -[__NSCFArray replaceObjectAtIndex:withObject:] + 119
+5 X11.bin 0x00000001003180f9 -[X11Controller tableView:setObjectValue:forTableColumn:row:] + 169
+
+Fixes: https://github.com/XQuartz/XQuartz/issues/267
+Signed-off-by: Jeremy Huddleston Sequoia <jeremyhu@apple.com>
+
+Upstream-Status: Backport [https://cgit.freedesktop.org/xorg/xserver/commit/?id=dfd057996b26420309c324ec844a5ba6dd07eda3]
+CVE: CVE-2022-3553
+Signed-off-by:Minjae Kim <flowergom@gmail.com>
+
+---
+ hw/xquartz/X11Controller.m | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/hw/xquartz/X11Controller.m b/hw/xquartz/X11Controller.m
+index 3efda50..9870ff2 100644
+--- a/hw/xquartz/X11Controller.m
++++ b/hw/xquartz/X11Controller.m
+@@ -467,8 +467,12 @@ extern char *bundle_id_prefix;
+ self.table_apps = table_apps;
+
+ NSArray * const apps = self.apps;
+- if (apps != nil)
+- [table_apps addObjectsFromArray:apps];
++
++ if (apps != nil) {
++ for (NSArray <NSString *> * row in apps) {
++ [table_apps addObject:row.mutableCopy];
++ }
++ }
+
+ columns = [apps_table tableColumns];
+ [[columns objectAtIndex:0] setIdentifier:@"0"];
+--
+2.17.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch
new file mode 100644
index 0000000000..3f6b68fea8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-4283.patch
@@ -0,0 +1,39 @@
+From ccdd431cd8f1cabae9d744f0514b6533c438908c Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 5 Dec 2022 15:55:54 +1000
+Subject: [PATCH] xkb: reset the radio_groups pointer to NULL after freeing it
+
+Unlike other elements of the keymap, this pointer was freed but not
+reset. On a subsequent XkbGetKbdByName request, the server may access
+already freed memory.
+
+CVE-2022-4283, ZDI-CAN-19530
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/ccdd431cd8f1cabae9d744f0514b6533c438908c]
+CVE: CVE-2022-4283
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ xkb/xkbUtils.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/xkb/xkbUtils.c b/xkb/xkbUtils.c
+index 8975ade..9bc51fc 100644
+--- a/xkb/xkbUtils.c
++++ b/xkb/xkbUtils.c
+@@ -1327,6 +1327,7 @@ _XkbCopyNames(XkbDescPtr src, XkbDescPtr dst)
+ }
+ else {
+ free(dst->names->radio_groups);
++ dst->names->radio_groups = NULL;
+ }
+ dst->names->num_rg = src->names->num_rg;
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch
new file mode 100644
index 0000000000..a6c97485cd
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46340.patch
@@ -0,0 +1,55 @@
+From b320ca0ffe4c0c872eeb3a93d9bde21f765c7c63 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 12:55:45 +1000
+Subject: [PATCH] Xtest: disallow GenericEvents in XTestSwapFakeInput
+
+XTestSwapFakeInput assumes all events in this request are
+sizeof(xEvent) and iterates through these in 32-byte increments.
+However, a GenericEvent may be of arbitrary length longer than 32 bytes,
+so any GenericEvent in this list would result in subsequent events to be
+misparsed.
+
+Additional, the swapped event is written into a stack-allocated struct
+xEvent (size 32 bytes). For any GenericEvent longer than 32 bytes,
+swapping the event may thus smash the stack like an avocado on toast.
+
+Catch this case early and return BadValue for any GenericEvent.
+Which is what would happen in unswapped setups anyway since XTest
+doesn't support GenericEvent.
+
+CVE-2022-46340, ZDI-CAN 19265
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/b320ca0ffe4c0c872eeb3a93d9bde21f765c7c63]
+CVE: CVE-2022-46340
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/xtest.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/Xext/xtest.c b/Xext/xtest.c
+index 38b8012..bf11789 100644
+--- a/Xext/xtest.c
++++ b/Xext/xtest.c
+@@ -501,10 +501,11 @@ XTestSwapFakeInput(ClientPtr client, xReq * req)
+
+ nev = ((req->length << 2) - sizeof(xReq)) / sizeof(xEvent);
+ for (ev = (xEvent *) &req[1]; --nev >= 0; ev++) {
++ int evtype = ev->u.u.type & 0x177;
+ /* Swap event */
+- proc = EventSwapVector[ev->u.u.type & 0177];
++ proc = EventSwapVector[evtype];
+ /* no swapping proc; invalid event type? */
+- if (!proc || proc == NotImplemented) {
++ if (!proc || proc == NotImplemented || evtype == GenericEvent) {
+ client->errorValue = ev->u.u.type;
+ return BadValue;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch
new file mode 100644
index 0000000000..0ef6e5fc9f
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46341.patch
@@ -0,0 +1,86 @@
+From 51eb63b0ee1509c6c6b8922b0e4aa037faa6f78b Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 13:55:32 +1000
+Subject: [PATCH] Xi: disallow passive grabs with a detail > 255
+
+The XKB protocol effectively prevents us from ever using keycodes above
+255. For buttons it's theoretically possible but realistically too niche
+to worry about. For all other passive grabs, the detail must be zero
+anyway.
+
+This fixes an OOB write:
+
+ProcXIPassiveUngrabDevice() calls DeletePassiveGrabFromList with a
+temporary grab struct which contains tempGrab->detail.exact = stuff->detail.
+For matching existing grabs, DeleteDetailFromMask is called with the
+stuff->detail value. This function creates a new mask with the one bit
+representing stuff->detail cleared.
+
+However, the array size for the new mask is 8 * sizeof(CARD32) bits,
+thus any detail above 255 results in an OOB array write.
+
+CVE-2022-46341, ZDI-CAN 19381
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/51eb63b0ee1509c6c6b8922b0e4aa037faa6f78b]
+CVE: CVE-2022-46341
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xi/xipassivegrab.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/Xi/xipassivegrab.c b/Xi/xipassivegrab.c
+index d30f51f..89a5910 100644
+--- a/Xi/xipassivegrab.c
++++ b/Xi/xipassivegrab.c
+@@ -133,6 +133,12 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ return BadValue;
+ }
+
++ /* XI2 allows 32-bit keycodes but thanks to XKB we can never
++ * implement this. Just return an error for all keycodes that
++ * cannot work anyway, same for buttons > 255. */
++ if (stuff->detail > 255)
++ return XIAlreadyGrabbed;
++
+ if (XICheckInvalidMaskBits(client, (unsigned char *) &stuff[1],
+ stuff->mask_len * 4) != Success)
+ return BadValue;
+@@ -203,14 +209,8 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ &param, XI2, &mask);
+ break;
+ case XIGrabtypeKeycode:
+- /* XI2 allows 32-bit keycodes but thanks to XKB we can never
+- * implement this. Just return an error for all keycodes that
+- * cannot work anyway */
+- if (stuff->detail > 255)
+- status = XIAlreadyGrabbed;
+- else
+- status = GrabKey(client, dev, mod_dev, stuff->detail,
+- &param, XI2, &mask);
++ status = GrabKey(client, dev, mod_dev, stuff->detail,
++ &param, XI2, &mask);
+ break;
+ case XIGrabtypeEnter:
+ case XIGrabtypeFocusIn:
+@@ -319,6 +319,12 @@ ProcXIPassiveUngrabDevice(ClientPtr client)
+ return BadValue;
+ }
+
++ /* We don't allow passive grabs for details > 255 anyway */
++ if (stuff->detail > 255) {
++ client->errorValue = stuff->detail;
++ return BadValue;
++ }
++
+ rc = dixLookupWindow(&win, stuff->grab_window, client, DixSetAttrAccess);
+ if (rc != Success)
+ return rc;
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch
new file mode 100644
index 0000000000..23fef3f321
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46342.patch
@@ -0,0 +1,78 @@
+From b79f32b57cc0c1186b2899bce7cf89f7b325161b Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Wed, 30 Nov 2022 11:20:40 +1000
+Subject: [PATCH] Xext: free the XvRTVideoNotify when turning off from the same
+ client
+
+This fixes a use-after-free bug:
+
+When a client first calls XvdiSelectVideoNotify() on a drawable with a
+TRUE onoff argument, a struct XvVideoNotifyRec is allocated. This struct
+is added twice to the resources:
+ - as the drawable's XvRTVideoNotifyList. This happens only once per
+ drawable, subsequent calls append to this list.
+ - as the client's XvRTVideoNotify. This happens for every client.
+
+The struct keeps the ClientPtr around once it has been added for a
+client. The idea, presumably, is that if the client disconnects we can remove
+all structs from the drawable's list that match the client (by resetting
+the ClientPtr to NULL), but if the drawable is destroyed we can remove
+and free the whole list.
+
+However, if the same client then calls XvdiSelectVideoNotify() on the
+same drawable with a FALSE onoff argument, only the ClientPtr on the
+existing struct was set to NULL. The struct itself remained in the
+client's resources.
+
+If the drawable is now destroyed, the resource system invokes
+XvdiDestroyVideoNotifyList which frees the whole list for this drawable
+- including our struct. This function however does not free the resource
+for the client since our ClientPtr is NULL.
+
+Later, when the client is destroyed and the resource system invokes
+XvdiDestroyVideoNotify, we unconditionally set the ClientPtr to NULL. On
+a struct that has been freed previously. This is generally frowned upon.
+
+Fix this by calling FreeResource() on the second call instead of merely
+setting the ClientPtr to NULL. This removes the struct from the client
+resources (but not from the list), ensuring that it won't be accessed
+again when the client quits.
+
+Note that the assignment tpn->client = NULL; is superfluous since the
+XvdiDestroyVideoNotify function will do this anyway. But it's left for
+clarity and to match a similar invocation in XvdiSelectPortNotify.
+
+CVE-2022-46342, ZDI-CAN 19400
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/b79f32b57cc0c1186b2899bce7cf89f7b325161b]
+CVE: CVE-2022-46342
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/xvmain.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Xext/xvmain.c b/Xext/xvmain.c
+index c520c7d..5f4c174 100644
+--- a/Xext/xvmain.c
++++ b/Xext/xvmain.c
+@@ -811,8 +811,10 @@ XvdiSelectVideoNotify(ClientPtr client, DrawablePtr pDraw, BOOL onoff)
+ tpn = pn;
+ while (tpn) {
+ if (tpn->client == client) {
+- if (!onoff)
++ if (!onoff) {
+ tpn->client = NULL;
++ FreeResource(tpn->id, XvRTVideoNotify);
++ }
+ return Success;
+ }
+ if (!tpn->client)
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch
new file mode 100644
index 0000000000..838f7d3726
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46343.patch
@@ -0,0 +1,51 @@
+From 842ca3ccef100ce010d1d8f5f6d6cc1915055900 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 14:53:07 +1000
+Subject: [PATCH] Xext: free the screen saver resource when replacing it
+
+This fixes a use-after-free bug:
+
+When a client first calls ScreenSaverSetAttributes(), a struct
+ScreenSaverAttrRec is allocated and added to the client's
+resources.
+
+When the same client calls ScreenSaverSetAttributes() again, a new
+struct ScreenSaverAttrRec is allocated, replacing the old struct. The
+old struct was freed but not removed from the clients resources.
+
+Later, when the client is destroyed the resource system invokes
+ScreenSaverFreeAttr and attempts to clean up the already freed struct.
+
+Fix this by letting the resource system free the old attrs instead.
+
+CVE-2022-46343, ZDI-CAN 19404
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/842ca3ccef100ce010d1d8f5f6d6cc1915055900]
+CVE: CVE-2022-46343
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xext/saver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Xext/saver.c b/Xext/saver.c
+index c23907d..05b9ca3 100644
+--- a/Xext/saver.c
++++ b/Xext/saver.c
+@@ -1051,7 +1051,7 @@ ScreenSaverSetAttributes(ClientPtr client)
+ pVlist++;
+ }
+ if (pPriv->attr)
+- FreeScreenAttr(pPriv->attr);
++ FreeResource(pPriv->attr->resource, AttrType);
+ pPriv->attr = pAttr;
+ pAttr->resource = FakeClientID(client->index);
+ if (!AddResource(pAttr->resource, AttrType, (void *) pAttr))
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch
new file mode 100644
index 0000000000..e25afa0d16
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2022-46344.patch
@@ -0,0 +1,75 @@
+From 8f454b793e1f13c99872c15f0eed1d7f3b823fe8 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 29 Nov 2022 13:26:57 +1000
+Subject: [PATCH] Xi: avoid integer truncation in length check of
+ ProcXIChangeProperty
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->num_items value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->num_items bytes, i.e. 4GB.
+
+The same bug exists in ProcChangeProperty and ProcXChangeDeviceProperty,
+so let's fix that too.
+
+CVE-2022-46344, ZDI-CAN 19405
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Acked-by: Olivier Fourdan <ofourdan@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/8f454b793e1f13c99872c15f0eed1d7f3b823fe8]
+CVE: CVE-2022-46344
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ Xi/xiproperty.c | 4 ++--
+ dix/property.c | 3 ++-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 6ec419e..0cfa6e3 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -890,7 +890,7 @@ ProcXChangeDeviceProperty(ClientPtr client)
+ REQUEST(xChangeDevicePropertyReq);
+ DeviceIntPtr dev;
+ unsigned long len;
+- int totalSize;
++ uint64_t totalSize;
+ int rc;
+
+ REQUEST_AT_LEAST_SIZE(xChangeDevicePropertyReq);
+@@ -1128,7 +1128,7 @@ ProcXIChangeProperty(ClientPtr client)
+ {
+ int rc;
+ DeviceIntPtr dev;
+- int totalSize;
++ uint64_t totalSize;
+ unsigned long len;
+
+ REQUEST(xXIChangePropertyReq);
+diff --git a/dix/property.c b/dix/property.c
+index ff1d669..6fdb74a 100644
+--- a/dix/property.c
++++ b/dix/property.c
+@@ -205,7 +205,8 @@ ProcChangeProperty(ClientPtr client)
+ WindowPtr pWin;
+ char format, mode;
+ unsigned long len;
+- int sizeInBytes, totalSize, err;
++ int sizeInBytes, err;
++ uint64_t totalSize;
+
+ REQUEST(xChangePropertyReq);
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch
new file mode 100644
index 0000000000..ef2ee5d55e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-0494.patch
@@ -0,0 +1,38 @@
+From 0ba6d8c37071131a49790243cdac55392ecf71ec Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Wed, 25 Jan 2023 11:41:40 +1000
+Subject: [PATCH] Xi: fix potential use-after-free in DeepCopyPointerClasses
+
+CVE-2023-0494, ZDI-CAN-19596
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/0ba6d8c37071131a49790243cdac55392ecf71ec]
+CVE: CVE-2023-0494
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index 217baa9561..dcd4efb3bc 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -619,8 +619,10 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+ sizeof(XkbAction));
+ }
+- else
++ else {
+ free(to->button->xkb_acts);
++ to->button->xkb_acts = NULL;
++ }
+
+ memcpy(to->button->labels, from->button->labels,
+ from->button->numButtons * sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch
new file mode 100644
index 0000000000..51d0e0cab6
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-1393.patch
@@ -0,0 +1,46 @@
+From 26ef545b3502f61ca722a7a3373507e88ef64110 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Mon, 13 Mar 2023 11:08:47 +0100
+Subject: [PATCH] composite: Fix use-after-free of the COW
+
+ZDI-CAN-19866/CVE-2023-1393
+
+If a client explicitly destroys the compositor overlay window (aka COW),
+we would leave a dangling pointer to that window in the CompScreen
+structure, which will trigger a use-after-free later.
+
+Make sure to clear the CompScreen pointer to the COW when the latter gets
+destroyed explicitly by the client.
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/26ef545b3502f61ca722a7a3373507e88ef64110]
+CVE: CVE-2023-1393
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ composite/compwindow.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/composite/compwindow.c b/composite/compwindow.c
+index 4e2494b86b..b30da589e9 100644
+--- a/composite/compwindow.c
++++ b/composite/compwindow.c
+@@ -620,6 +620,11 @@ compDestroyWindow(WindowPtr pWin)
+ ret = (*pScreen->DestroyWindow) (pWin);
+ cs->DestroyWindow = pScreen->DestroyWindow;
+ pScreen->DestroyWindow = compDestroyWindow;
++
++ /* Did we just destroy the overlay window? */
++ if (pWin == cs->pOverlayWin)
++ cs->pOverlayWin = NULL;
++
+ /* compCheckTree (pWin->drawable.pScreen); can't check -- tree isn't good*/
+ return ret;
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
new file mode 100644
index 0000000000..508588481e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5367.patch
@@ -0,0 +1,84 @@
+From 541ab2ecd41d4d8689e71855d93e492bc554719a Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 3 Oct 2023 11:53:05 +1000
+Subject: [PATCH] Xi/randr: fix handling of PropModeAppend/Prepend
+
+The handling of appending/prepending properties was incorrect, with at
+least two bugs: the property length was set to the length of the new
+part only, i.e. appending or prepending N elements to a property with P
+existing elements always resulted in the property having N elements
+instead of N + P.
+
+Second, when pre-pending a value to a property, the offset for the old
+values was incorrect, leaving the new property with potentially
+uninitalized values and/or resulting in OOB memory writes.
+For example, prepending a 3 element value to a 5 element property would
+result in this 8 value array:
+ [N, N, N, ?, ?, P, P, P ] P, P
+ ^OOB write
+
+The XI2 code is a copy/paste of the RandR code, so the bug exists in
+both.
+
+CVE-2023-5367, ZDI-CAN-22153
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/541ab2ecd41d4d8689e71855d93e492bc554719a]
+CVE: CVE-2023-5367
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiproperty.c | 4 ++--
+ randr/rrproperty.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiproperty.c b/Xi/xiproperty.c
+index 066ba21fba..d315f04d0e 100644
+--- a/Xi/xiproperty.c
++++ b/Xi/xiproperty.c
+@@ -730,7 +730,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ XIDestroyDeviceProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -747,7 +747,7 @@ XIChangeDeviceProperty(DeviceIntPtr dev, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index c2fb9585c6..25469f57b2 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -209,7 +209,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ RRDestroyOutputProperty(prop);
+ return BadAlloc;
+ }
+- new_value.size = len;
++ new_value.size = total_len;
+ new_value.type = type;
+ new_value.format = format;
+
+@@ -226,7 +226,7 @@ RRChangeOutputProperty(RROutputPtr output, Atom property, Atom type,
+ case PropModePrepend:
+ new_data = new_value.data;
+ old_data = (void *) (((char *) new_value.data) +
+- (prop_value->size * size_in_bytes));
++ (len * size_in_bytes));
+ break;
+ }
+ if (new_data)
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
new file mode 100644
index 0000000000..720340d83b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-5380.patch
@@ -0,0 +1,102 @@
+From 564ccf2ce9616620456102727acb8b0256b7bbd7 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 5 Oct 2023 12:19:45 +1000
+Subject: [PATCH] mi: reset the PointerWindows reference on screen switch
+
+PointerWindows[] keeps a reference to the last window our sprite
+entered - changes are usually handled by CheckMotion().
+
+If we switch between screens via XWarpPointer our
+dev->spriteInfo->sprite->win is set to the new screen's root window.
+If there's another window at the cursor location CheckMotion() will
+trigger the right enter/leave events later. If there is not, it skips
+that process and we never trigger LeaveWindow() - PointerWindows[] for
+the device still refers to the previous window.
+
+If that window is destroyed we have a dangling reference that will
+eventually cause a use-after-free bug when checking the window hierarchy
+later.
+
+To trigger this, we require:
+- two protocol screens
+- XWarpPointer to the other screen's root window
+- XDestroyWindow before entering any other window
+
+This is a niche bug so we hack around it by making sure we reset the
+PointerWindows[] entry so we cannot have a dangling pointer. This
+doesn't handle Enter/Leave events correctly but the previous code didn't
+either.
+
+CVE-2023-5380, ZDI-CAN-21608
+
+This vulnerability was discovered by:
+Sri working with Trend Micro Zero Day Initiative
+
+Signed-off-by: Peter Hutterer <peter.hutterer@who-t.net>
+Reviewed-by: Adam Jackson <ajax@redhat.com>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/564ccf2ce9616620456102727acb8b0256b7bbd7]
+CVE: CVE-2023-5380
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.h | 2 --
+ include/eventstr.h | 3 +++
+ mi/mipointer.c | 17 +++++++++++++++--
+ 3 files changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/dix/enterleave.h b/dix/enterleave.h
+index 4b833d8..e8af924 100644
+--- a/dix/enterleave.h
++++ b/dix/enterleave.h
+@@ -58,8 +58,6 @@ extern void DeviceFocusEvent(DeviceIntPtr dev,
+
+ extern void EnterWindow(DeviceIntPtr dev, WindowPtr win, int mode);
+
+-extern void LeaveWindow(DeviceIntPtr dev);
+-
+ extern void CoreFocusEvent(DeviceIntPtr kbd,
+ int type, int mode, int detail, WindowPtr pWin);
+
+diff --git a/include/eventstr.h b/include/eventstr.h
+index bf3b95f..2bae3b0 100644
+--- a/include/eventstr.h
++++ b/include/eventstr.h
+@@ -296,4 +296,7 @@ union _InternalEvent {
+ #endif
+ };
+
++extern void
++LeaveWindow(DeviceIntPtr dev);
++
+ #endif
+diff --git a/mi/mipointer.c b/mi/mipointer.c
+index 75be1ae..b12ae9b 100644
+--- a/mi/mipointer.c
++++ b/mi/mipointer.c
+@@ -397,8 +397,21 @@ miPointerWarpCursor(DeviceIntPtr pDev, ScreenPtr pScreen, int x, int y)
+ #ifdef PANORAMIX
+ && noPanoramiXExtension
+ #endif
+- )
+- UpdateSpriteForScreen(pDev, pScreen);
++ ) {
++ DeviceIntPtr master = GetMaster(pDev, MASTER_POINTER);
++ /* Hack for CVE-2023-5380: if we're moving
++ * screens PointerWindows[] keeps referring to the
++ * old window. If that gets destroyed we have a UAF
++ * bug later. Only happens when jumping from a window
++ * to the root window on the other screen.
++ * Enter/Leave events are incorrect for that case but
++ * too niche to fix.
++ */
++ LeaveWindow(pDev);
++ if (master)
++ LeaveWindow(master);
++ UpdateSpriteForScreen(pDev, pScreen);
++ }
+ }
+
+ /**
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
new file mode 100644
index 0000000000..0abd5914fa
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6377.patch
@@ -0,0 +1,79 @@
+From 0c1a93d319558fe3ab2d94f51d174b4f93810afd Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Tue, 28 Nov 2023 15:19:04 +1000
+Subject: [PATCH] Xi: allocate enough XkbActions for our buttons
+
+button->xkb_acts is supposed to be an array sufficiently large for all
+our buttons, not just a single XkbActions struct. Allocating
+insufficient memory here means when we memcpy() later in
+XkbSetDeviceInfo we write into memory that wasn't ours to begin with,
+leading to the usual security ooopsiedaisies.
+
+CVE-2023-6377, ZDI-CAN-22412, ZDI-CAN-22413
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/0c1a93d319558fe3ab2d94f51d174b4f93810afd]
+CVE: CVE-2023-6377
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 12 ++++++------
+ dix/devices.c | 10 ++++++++++
+ 2 files changed, 16 insertions(+), 6 deletions(-)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index dcd4efb3bc..54ea11a938 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -611,13 +611,13 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ }
+
+ if (from->button->xkb_acts) {
+- if (!to->button->xkb_acts) {
+- to->button->xkb_acts = calloc(1, sizeof(XkbAction));
+- if (!to->button->xkb_acts)
+- FatalError("[Xi] not enough memory for xkb_acts.\n");
+- }
++ size_t maxbuttons = max(to->button->numButtons, from->button->numButtons);
++ to->button->xkb_acts = xnfreallocarray(to->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(to->button->xkb_acts, 0, maxbuttons * sizeof(XkbAction));
+ memcpy(to->button->xkb_acts, from->button->xkb_acts,
+- sizeof(XkbAction));
++ from->button->numButtons * sizeof(XkbAction));
+ }
+ else {
+ free(to->button->xkb_acts);
+diff --git a/dix/devices.c b/dix/devices.c
+index b063128df0..3f3224d626 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -2539,6 +2539,8 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+
+ if (master->button && master->button->numButtons != maxbuttons) {
+ int i;
++ int last_num_buttons = master->button->numButtons;
++
+ DeviceChangedEvent event = {
+ .header = ET_Internal,
+ .type = ET_DeviceChanged,
+@@ -2549,6 +2551,14 @@ RecalculateMasterButtons(DeviceIntPtr slave)
+ };
+
+ master->button->numButtons = maxbuttons;
++ if (last_num_buttons < maxbuttons) {
++ master->button->xkb_acts = xnfreallocarray(master->button->xkb_acts,
++ maxbuttons,
++ sizeof(XkbAction));
++ memset(&master->button->xkb_acts[last_num_buttons],
++ 0,
++ (maxbuttons - last_num_buttons) * sizeof(XkbAction));
++ }
+
+ memcpy(&event.buttons.names, master->button->labels, maxbuttons *
+ sizeof(Atom));
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
new file mode 100644
index 0000000000..6392eae3f8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6478.patch
@@ -0,0 +1,63 @@
+From 14f480010a93ff962fef66a16412fafff81ad632 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 27 Nov 2023 16:27:49 +1000
+Subject: [PATCH] randr: avoid integer truncation in length check of
+ ProcRRChange*Property
+
+Affected are ProcRRChangeProviderProperty and ProcRRChangeOutputProperty.
+See also xserver@8f454b79 where this same bug was fixed for the core
+protocol and XI.
+
+This fixes an OOB read and the resulting information disclosure.
+
+Length calculation for the request was clipped to a 32-bit integer. With
+the correct stuff->nUnits value the expected request size was
+truncated, passing the REQUEST_FIXED_SIZE check.
+
+The server then proceeded with reading at least stuff->num_items bytes
+(depending on stuff->format) from the request and stuffing whatever it
+finds into the property. In the process it would also allocate at least
+stuff->nUnits bytes, i.e. 4GB.
+
+CVE-2023-6478, ZDI-CAN-22561
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/14f480010a93ff962fef66a16412fafff81ad632]
+CVE: CVE-2023-6478
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ randr/rrproperty.c | 2 +-
+ randr/rrproviderproperty.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/randr/rrproperty.c b/randr/rrproperty.c
+index 25469f57b2..c4fef8a1f6 100644
+--- a/randr/rrproperty.c
++++ b/randr/rrproperty.c
+@@ -530,7 +530,7 @@ ProcRRChangeOutputProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeOutputPropertyReq);
+diff --git a/randr/rrproviderproperty.c b/randr/rrproviderproperty.c
+index b79c17f9bf..90c5a9a933 100644
+--- a/randr/rrproviderproperty.c
++++ b/randr/rrproviderproperty.c
+@@ -498,7 +498,7 @@ ProcRRChangeProviderProperty(ClientPtr client)
+ char format, mode;
+ unsigned long len;
+ int sizeInBytes;
+- int totalSize;
++ uint64_t totalSize;
+ int err;
+
+ REQUEST_AT_LEAST_SIZE(xRRChangeProviderPropertyReq);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
new file mode 100644
index 0000000000..0bfff268e7
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2023-6816.patch
@@ -0,0 +1,55 @@
+From 9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 14 Dec 2023 11:29:49 +1000
+Subject: [PATCH] dix: allocate enough space for logical button maps
+
+Both DeviceFocusEvent and the XIQueryPointer reply contain a bit for
+each logical button currently down. Since buttons can be arbitrarily mapped
+to anything up to 255 make sure we have enough bits for the maximum mapping.
+
+CVE-2023-6816, ZDI-CAN-22664, ZDI-CAN-22665
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/9e2ecb2af8302dedc49cb6a63ebe063c58a9e7e3]
+CVE: CVE-2023-6816
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xiquerypointer.c | 3 +--
+ dix/enterleave.c | 5 +++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/Xi/xiquerypointer.c b/Xi/xiquerypointer.c
+index 5b77b1a444..2b05ac5f39 100644
+--- a/Xi/xiquerypointer.c
++++ b/Xi/xiquerypointer.c
+@@ -149,8 +149,7 @@ ProcXIQueryPointer(ClientPtr client)
+ if (pDev->button) {
+ int i;
+
+- rep.buttons_len =
+- bytes_to_int32(bits_to_bytes(pDev->button->numButtons));
++ rep.buttons_len = bytes_to_int32(bits_to_bytes(256)); /* button map up to 255 */
+ rep.length += rep.buttons_len;
+ buttons = calloc(rep.buttons_len, 4);
+ if (!buttons)
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 867ec74363..ded8679d76 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -784,8 +784,9 @@ DeviceFocusEvent(DeviceIntPtr dev, int type, int mode, int detail,
+
+ mouse = IsFloating(dev) ? dev : GetMaster(dev, MASTER_POINTER);
+
+- /* XI 2 event */
+- btlen = (mouse->button) ? bits_to_bytes(mouse->button->numButtons) : 0;
++ /* XI 2 event contains the logical button map - maps are CARD8
++ * so we need 256 bits for the possibly maximum mapping */
++ btlen = (mouse->button) ? bits_to_bytes(256) : 0;
+ btlen = bytes_to_int32(btlen);
+ len = sizeof(xXIFocusInEvent) + btlen * 4;
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
new file mode 100644
index 0000000000..80ebc64e59
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-1.patch
@@ -0,0 +1,87 @@
+From ece23be888a93b741aa1209d1dbf64636109d6a5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 14:27:50 +1000
+Subject: [PATCH] dix: Allocate sufficient xEvents for our DeviceStateNotify
+
+If a device has both a button class and a key class and numButtons is
+zero, we can get an OOB write due to event under-allocation.
+
+This function seems to assume a device has either keys or buttons, not
+both. It has two virtually identical code paths, both of which assume
+they're applying to the first event in the sequence.
+
+A device with both a key and button class triggered a logic bug - only
+one xEvent was allocated but the deviceStateNotify pointer was pushed on
+once per type. So effectively this logic code:
+
+ int count = 1;
+ if (button && nbuttons > 32) count++;
+ if (key && nbuttons > 0) count++;
+ if (key && nkeys > 32) count++; // this is basically always true
+ // count is at 2 for our keys + zero button device
+
+ ev = alloc(count * sizeof(xEvent));
+ FixDeviceStateNotify(ev);
+ if (button)
+ FixDeviceStateNotify(ev++);
+ if (key)
+ FixDeviceStateNotify(ev++); // santa drops into the wrong chimney here
+
+If the device has more than 3 valuators, the OOB is pushed back - we're
+off by one so it will happen when the last deviceValuator event is
+written instead.
+
+Fix this by allocating the maximum number of events we may allocate.
+Note that the current behavior is not protocol-correct anyway, this
+patch fixes only the allocation issue.
+
+Note that this issue does not trigger if the device has at least one
+button. While the server does not prevent a button class with zero
+buttons, it is very unlikely.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/ece23be888a93b741aa1209d1dbf64636109d6a5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index ded8679d76..17964b00a4 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -675,7 +675,8 @@ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
+ int evcount = 1;
+- deviceStateNotify *ev, *sev;
++ deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
++ deviceStateNotify *ev;
+ deviceKeyStateNotify *kev;
+ deviceButtonStateNotify *bev;
+
+@@ -714,7 +715,7 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ }
+ }
+
+- sev = ev = xallocarray(evcount, sizeof(xEvent));
++ ev = sev;
+ FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+
+ if (b != NULL) {
+@@ -770,7 +771,6 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+ DeviceStateNotifyMask, NullGrab);
+- free(sev);
+ }
+
+ void
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
new file mode 100644
index 0000000000..65df74376b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-2.patch
@@ -0,0 +1,221 @@
+From 219c54b8a3337456ce5270ded6a67bcde53553d5 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Mon, 18 Dec 2023 12:26:20 +1000
+Subject: [PATCH] dix: fix DeviceStateNotify event calculation
+
+The previous code only made sense if one considers buttons and keys to
+be mutually exclusive on a device. That is not necessarily true, causing
+a number of issues.
+
+This function allocates and fills in the number of xEvents we need to
+send the device state down the wire. This is split across multiple
+32-byte devices including one deviceStateNotify event and optional
+deviceKeyStateNotify, deviceButtonStateNotify and (possibly multiple)
+deviceValuator events.
+
+The previous behavior would instead compose a sequence
+of [state, buttonstate, state, keystate, valuator...]. This is not
+protocol correct, and on top of that made the code extremely convoluted.
+
+Fix this by streamlining: add both button and key into the deviceStateNotify
+and then append the key state and button state, followed by the
+valuators. Finally, the deviceValuator events contain up to 6 valuators
+per event but we only ever sent through 3 at a time. Let's double that
+troughput.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/219c54b8a3337456ce5270ded6a67bcde53553d5]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/enterleave.c | 121 ++++++++++++++++++++---------------------------
+ 1 file changed, 52 insertions(+), 69 deletions(-)
+
+diff --git a/dix/enterleave.c b/dix/enterleave.c
+index 17964b00a4..7b7ba1098b 100644
+--- a/dix/enterleave.c
++++ b/dix/enterleave.c
+@@ -615,9 +615,15 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+
+ ev->type = DeviceValuator;
+ ev->deviceid = dev->id;
+- ev->num_valuators = nval < 3 ? nval : 3;
++ ev->num_valuators = nval < 6 ? nval : 6;
+ ev->first_valuator = first;
+ switch (ev->num_valuators) {
++ case 6:
++ ev->valuator2 = v->axisVal[first + 5];
++ case 5:
++ ev->valuator2 = v->axisVal[first + 4];
++ case 4:
++ ev->valuator2 = v->axisVal[first + 3];
+ case 3:
+ ev->valuator2 = v->axisVal[first + 2];
+ case 2:
+@@ -626,7 +632,6 @@ FixDeviceValuator(DeviceIntPtr dev, deviceValuator * ev, ValuatorClassPtr v,
+ ev->valuator0 = v->axisVal[first];
+ break;
+ }
+- first += ev->num_valuators;
+ }
+
+ static void
+@@ -646,7 +651,7 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ ev->num_buttons = b->numButtons;
+ memcpy((char *) ev->buttons, (char *) b->down, 4);
+ }
+- else if (k) {
++ if (k) {
+ ev->classes_reported |= (1 << KeyClass);
+ ev->num_keys = k->xkbInfo->desc->max_key_code -
+ k->xkbInfo->desc->min_key_code;
+@@ -670,15 +675,26 @@ FixDeviceStateNotify(DeviceIntPtr dev, deviceStateNotify * ev, KeyClassPtr k,
+ }
+ }
+
+-
++/**
++ * The device state notify event is split across multiple 32-byte events.
++ * The first one contains the first 32 button state bits, the first 32
++ * key state bits, and the first 3 valuator values.
++ *
++ * If a device has more than that, the server sends out:
++ * - one deviceButtonStateNotify for buttons 32 and above
++ * - one deviceKeyStateNotify for keys 32 and above
++ * - one deviceValuator event per 6 valuators above valuator 4
++ *
++ * All events but the last one have the deviceid binary ORed with MORE_EVENTS,
++ */
+ static void
+ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+ {
++ /* deviceStateNotify, deviceKeyStateNotify, deviceButtonStateNotify
++ * and one deviceValuator for each 6 valuators */
++ deviceStateNotify sev[3 + (MAX_VALUATORS + 6)/6];
+ int evcount = 1;
+- deviceStateNotify sev[6 + (MAX_VALUATORS + 2)/3];
+- deviceStateNotify *ev;
+- deviceKeyStateNotify *kev;
+- deviceButtonStateNotify *bev;
++ deviceStateNotify *ev = sev;
+
+ KeyClassPtr k;
+ ButtonClassPtr b;
+@@ -691,82 +707,49 @@ DeliverStateNotifyEvent(DeviceIntPtr dev, WindowPtr win)
+
+ if ((b = dev->button) != NULL) {
+ nbuttons = b->numButtons;
+- if (nbuttons > 32)
++ if (nbuttons > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+ }
+ if ((k = dev->key) != NULL) {
+ nkeys = k->xkbInfo->desc->max_key_code - k->xkbInfo->desc->min_key_code;
+- if (nkeys > 32)
++ if (nkeys > 32) /* first 32 are encoded in deviceStateNotify */
+ evcount++;
+- if (nbuttons > 0) {
+- evcount++;
+- }
+ }
+ if ((v = dev->valuator) != NULL) {
+ nval = v->numAxes;
+-
+- if (nval > 3)
+- evcount++;
+- if (nval > 6) {
+- if (!(k && b))
+- evcount++;
+- if (nval > 9)
+- evcount += ((nval - 7) / 3);
+- }
++ /* first three are encoded in deviceStateNotify, then
++ * it's 6 per deviceValuator event */
++ evcount += ((nval - 3) + 6)/6;
+ }
+
+- ev = sev;
+- FixDeviceStateNotify(dev, ev, NULL, NULL, NULL, first);
+-
+- if (b != NULL) {
+- FixDeviceStateNotify(dev, ev++, NULL, b, v, first);
+- first += 3;
+- nval -= 3;
+- if (nbuttons > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- bev = (deviceButtonStateNotify *) ev++;
+- bev->type = DeviceButtonStateNotify;
+- bev->deviceid = dev->id;
+- memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
+- DOWN_LENGTH - 4);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ BUG_RETURN(evcount <= ARRAY_SIZE(sev));
++
++ FixDeviceStateNotify(dev, ev, k, b, v, first);
++
++ if (b != NULL && nbuttons > 32) {
++ deviceButtonStateNotify *bev = (deviceButtonStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ bev->type = DeviceButtonStateNotify;
++ bev->deviceid = dev->id;
++ memcpy((char *) &bev->buttons[4], (char *) &b->down[4],
++ DOWN_LENGTH - 4);
+ }
+
+- if (k != NULL) {
+- FixDeviceStateNotify(dev, ev++, k, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nkeys > 32) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- kev = (deviceKeyStateNotify *) ev++;
+- kev->type = DeviceKeyStateNotify;
+- kev->deviceid = dev->id;
+- memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+- }
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ if (k != NULL && nkeys > 32) {
++ deviceKeyStateNotify *kev = (deviceKeyStateNotify *) ++ev;
++ (ev - 1)->deviceid |= MORE_EVENTS;
++ kev->type = DeviceKeyStateNotify;
++ kev->deviceid = dev->id;
++ memmove((char *) &kev->keys[0], (char *) &k->down[4], 28);
+ }
+
++ first = 3;
++ nval -= 3;
+ while (nval > 0) {
+- FixDeviceStateNotify(dev, ev++, NULL, NULL, v, first);
+- first += 3;
+- nval -= 3;
+- if (nval > 0) {
+- (ev - 1)->deviceid |= MORE_EVENTS;
+- FixDeviceValuator(dev, (deviceValuator *) ev++, v, first);
+- first += 3;
+- nval -= 3;
+- }
++ ev->deviceid |= MORE_EVENTS;
++ FixDeviceValuator(dev, (deviceValuator *) ++ev, v, first);
++ first += 6;
++ nval -= 6;
+ }
+
+ DeliverEventsToWindow(dev, win, (xEvent *) sev, evcount,
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
new file mode 100644
index 0000000000..742c122fa8
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-3.patch
@@ -0,0 +1,41 @@
+From df3c65706eb169d5938df0052059f3e0d5981b74 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 13:48:10 +1000
+Subject: [PATCH] Xi: when creating a new ButtonClass, set the number of
+ buttons
+
+There's a racy sequence where a master device may copy the button class
+from the slave, without ever initializing numButtons. This leads to a
+device with zero buttons but a button class which is invalid.
+
+Let's copy the numButtons value from the source - by definition if we
+don't have a button class yet we do not have any other slave devices
+with more than this number of buttons anyway.
+
+CVE-2024-0229, ZDI-CAN-22678
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/df3c65706eb169d5938df0052059f3e0d5981b74]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/exevents.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Xi/exevents.c b/Xi/exevents.c
+index 54ea11a938..e161714682 100644
+--- a/Xi/exevents.c
++++ b/Xi/exevents.c
+@@ -605,6 +605,7 @@ DeepCopyPointerClasses(DeviceIntPtr from, DeviceIntPtr to)
+ to->button = calloc(1, sizeof(ButtonClassRec));
+ if (!to->button)
+ FatalError("[Xi] no memory for class shift.\n");
++ to->button->numButtons = from->button->numButtons;
+ }
+ else
+ classes->button = NULL;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
new file mode 100644
index 0000000000..d1a6214793
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0229-4.patch
@@ -0,0 +1,45 @@
+From 37539cb0bfe4ed96d4499bf371e6b1a474a740fe Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 21 Dec 2023 14:10:11 +1000
+Subject: [PATCH] Xi: require a pointer and keyboard device for
+ XIAttachToMaster
+
+If we remove a master device and specify which other master devices
+attached slaves should be returned to, enforce that those two are
+indeeed a pointer and a keyboard.
+
+Otherwise we can try to attach the keyboards to pointers and vice versa,
+leading to possible crashes later.
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/37539cb0bfe4ed96d4499bf371e6b1a474a740fe]
+CVE: CVE-2024-0229
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index 504defe566..d2d985848d 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -270,7 +270,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newptr)) {
++ if (!IsMaster(newptr) || !IsPointerDevice(newptr)) {
+ client->errorValue = r->return_pointer;
+ rc = BadDevice;
+ goto unwind;
+@@ -281,7 +281,7 @@ remove_master(ClientPtr client, xXIRemoveMasterInfo * r, int flags[MAXDEVICES])
+ if (rc != Success)
+ goto unwind;
+
+- if (!IsMaster(newkeybd)) {
++ if (!IsMaster(newkeybd) || !IsKeyboardDevice(newkeybd)) {
+ client->errorValue = r->return_keyboard;
+ rc = BadDevice;
+ goto unwind;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
new file mode 100644
index 0000000000..c8f75d8a7e
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0408.patch
@@ -0,0 +1,64 @@
+From e5e8586a12a3ec915673edffa10dc8fe5e15dac3 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 12:09:41 +0100
+Subject: [PATCH] glx: Call XACE hooks on the GLX buffer
+
+The XSELINUX code will label resources at creation by checking the
+access mode. When the access mode is DixCreateAccess, it will call the
+function to label the new resource SELinuxLabelResource().
+
+However, GLX buffers do not go through the XACE hooks when created,
+hence leaving the resource actually unlabeled.
+
+When, later, the client tries to create another resource using that
+drawable (like a GC for example), the XSELINUX code would try to use
+the security ID of that object which has never been labeled, get a NULL
+pointer and crash when checking whether the requested permissions are
+granted for subject security ID.
+
+To avoid the issue, make sure to call the XACE hooks when creating the
+GLX buffers.
+
+Credit goes to Donn Seeley <donn@xmission.com> for providing the patch.
+
+CVE-2024-0408
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Acked-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/e5e8586a12a3ec915673edffa10dc8fe5e15dac3]
+CVE: CVE-2024-0408
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ glx/glxcmds.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/glx/glxcmds.c b/glx/glxcmds.c
+index fc26a2e345..1e46d0c723 100644
+--- a/glx/glxcmds.c
++++ b/glx/glxcmds.c
+@@ -48,6 +48,7 @@
+ #include "indirect_util.h"
+ #include "protocol-versions.h"
+ #include "glxvndabi.h"
++#include "xace.h"
+
+ static char GLXServerVendorName[] = "SGI";
+
+@@ -1392,6 +1393,13 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID fbconfigId,
+ if (!pPixmap)
+ return BadAlloc;
+
++ err = XaceHook(XACE_RESOURCE_ACCESS, client, glxDrawableId, RT_PIXMAP,
++ pPixmap, RT_NONE, NULL, DixCreateAccess);
++ if (err != Success) {
++ (*pGlxScreen->pScreen->DestroyPixmap) (pPixmap);
++ return err;
++ }
++
+ /* Assign the pixmap the same id as the pbuffer and add it as a
+ * resource so it and the DRI2 drawable will be reclaimed when the
+ * pbuffer is destroyed. */
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
new file mode 100644
index 0000000000..9763e0b562
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-0409.patch
@@ -0,0 +1,46 @@
+From 2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7 Mon Sep 17 00:00:00 2001
+From: Olivier Fourdan <ofourdan@redhat.com>
+Date: Wed, 6 Dec 2023 11:51:56 +0100
+Subject: [PATCH] ephyr,xwayland: Use the proper private key for cursor
+
+The cursor in DIX is actually split in two parts, the cursor itself and
+the cursor bits, each with their own devPrivates.
+
+The cursor itself includes the cursor bits, meaning that the cursor bits
+devPrivates in within structure of the cursor.
+
+Both Xephyr and Xwayland were using the private key for the cursor bits
+to store the data for the cursor, and when using XSELINUX which comes
+with its own special devPrivates, the data stored in that cursor bits'
+devPrivates would interfere with the XSELINUX devPrivates data and the
+SELINUX security ID would point to some other unrelated data, causing a
+crash in the XSELINUX code when trying to (re)use the security ID.
+
+CVE-2024-0409
+
+Signed-off-by: Olivier Fourdan <ofourdan@redhat.com>
+Reviewed-by: Peter Hutterer <peter.hutterer@who-t.net>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/2ef0f1116c65d5cb06d7b6d83f8a1aea702c94f7]
+CVE: CVE-2024-0409
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ hw/kdrive/ephyr/ephyrcursor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/kdrive/ephyr/ephyrcursor.c b/hw/kdrive/ephyr/ephyrcursor.c
+index f991899..3f192d0 100644
+--- a/hw/kdrive/ephyr/ephyrcursor.c
++++ b/hw/kdrive/ephyr/ephyrcursor.c
+@@ -246,7 +246,7 @@ miPointerSpriteFuncRec EphyrPointerSpriteFuncs = {
+ Bool
+ ephyrCursorInit(ScreenPtr screen)
+ {
+- if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR_BITS,
++ if (!dixRegisterPrivateKey(&ephyrCursorPrivateKey, PRIVATE_CURSOR,
+ sizeof(ephyrCursorRec)))
+ return FALSE;
+
+--
+2.25.1
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
new file mode 100644
index 0000000000..7c8fbcc3ec
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21885.patch
@@ -0,0 +1,113 @@
+From 4a5e9b1895627d40d26045bd0b7ef3dce503cbd1 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Thu, 4 Jan 2024 10:01:24 +1000
+Subject: [PATCH] Xi: flush hierarchy events after adding/removing master
+ devices
+
+The `XISendDeviceHierarchyEvent()` function allocates space to store up
+to `MAXDEVICES` (256) `xXIHierarchyInfo` structures in `info`.
+
+If a device with a given ID was removed and a new device with the same
+ID added both in the same operation, the single device ID will lead to
+two info structures being written to `info`.
+
+Since this case can occur for every device ID at once, a total of two
+times `MAXDEVICES` info structures might be written to the allocation.
+
+To avoid it, once one add/remove master is processed, send out the
+device hierarchy event for the current state and continue. That event
+thus only ever has exactly one of either added/removed in it (and
+optionally slave attached/detached).
+
+CVE-2024-21885, ZDI-CAN-22744
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/4a5e9b1895627d40d26045bd0b7ef3dce503cbd1]
+CVE: CVE-2024-21885
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ Xi/xichangehierarchy.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/Xi/xichangehierarchy.c b/Xi/xichangehierarchy.c
+index d2d985848d..72d00451e3 100644
+--- a/Xi/xichangehierarchy.c
++++ b/Xi/xichangehierarchy.c
+@@ -416,6 +416,11 @@ ProcXIChangeHierarchy(ClientPtr client)
+ size_t len; /* length of data remaining in request */
+ int rc = Success;
+ int flags[MAXDEVICES] = { 0 };
++ enum {
++ NO_CHANGE,
++ FLUSH,
++ CHANGED,
++ } changes = NO_CHANGE;
+
+ REQUEST(xXIChangeHierarchyReq);
+ REQUEST_AT_LEAST_SIZE(xXIChangeHierarchyReq);
+@@ -465,8 +470,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = add_master(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIRemoveMaster:
+ {
+ xXIRemoveMasterInfo *r = (xXIRemoveMasterInfo *) any;
+@@ -475,8 +481,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = remove_master(client, r, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = FLUSH;
+ break;
++ }
+ case XIDetachSlave:
+ {
+ xXIDetachSlaveInfo *c = (xXIDetachSlaveInfo *) any;
+@@ -485,8 +492,9 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = detach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
+- }
++ changes = CHANGED;
+ break;
++ }
+ case XIAttachSlave:
+ {
+ xXIAttachSlaveInfo *c = (xXIAttachSlaveInfo *) any;
+@@ -495,16 +503,25 @@ ProcXIChangeHierarchy(ClientPtr client)
+ rc = attach_slave(client, c, flags);
+ if (rc != Success)
+ goto unwind;
++ changes = CHANGED;
++ break;
+ }
++ default:
+ break;
+ }
+
++ if (changes == FLUSH) {
++ XISendDeviceHierarchyEvent(flags);
++ memset(flags, 0, sizeof(flags));
++ changes = NO_CHANGE;
++ }
++
+ len -= any->length * 4;
+ any = (xXIAnyHierarchyChangeInfo *) ((char *) any + any->length * 4);
+ }
+
+ unwind:
+-
+- XISendDeviceHierarchyEvent(flags);
++ if (changes != NO_CHANGE)
++ XISendDeviceHierarchyEvent(flags);
+ return rc;
+ }
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
new file mode 100644
index 0000000000..1e1c782963
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-1.patch
@@ -0,0 +1,74 @@
+From bc1fdbe46559dd947674375946bbef54dd0ce36b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= <jexposit@redhat.com>
+Date: Fri, 22 Dec 2023 18:28:31 +0100
+Subject: [PATCH] Xi: do not keep linked list pointer during recursion
+
+The `DisableDevice()` function is called whenever an enabled device
+is disabled and it moves the device from the `inputInfo.devices` linked
+list to the `inputInfo.off_devices` linked list.
+
+However, its link/unlink operation has an issue during the recursive
+call to `DisableDevice()` due to the `prev` pointer pointing to a
+removed device.
+
+This issue leads to a length mismatch between the total number of
+devices and the number of device in the list, leading to a heap
+overflow and, possibly, to local privilege escalation.
+
+Simplify the code that checked whether the device passed to
+`DisableDevice()` was in `inputInfo.devices` or not and find the
+previous device after the recursion.
+
+CVE-2024-21886, ZDI-CAN-22840
+
+This vulnerability was discovered by:
+Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/bc1fdbe46559dd947674375946bbef54dd0ce36b]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index dca98c8d1b..389d28a23c 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -453,14 +453,20 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ {
+ DeviceIntPtr *prev, other;
+ BOOL enabled;
++ BOOL dev_in_devices_list = FALSE;
+ int flags[MAXDEVICES] = { 0 };
+
+ if (!dev->enabled)
+ return TRUE;
+
+- for (prev = &inputInfo.devices;
+- *prev && (*prev != dev); prev = &(*prev)->next);
+- if (*prev != dev)
++ for (other = inputInfo.devices; other; other = other->next) {
++ if (other == dev) {
++ dev_in_devices_list = TRUE;
++ break;
++ }
++ }
++
++ if (!dev_in_devices_list)
+ return FALSE;
+
+ TouchEndPhysicallyActiveTouches(dev);
+@@ -511,6 +517,9 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ LeaveWindow(dev);
+ SetFocusOut(dev);
+
++ for (prev = &inputInfo.devices;
++ *prev && (*prev != dev); prev = &(*prev)->next);
++
+ *prev = dev->next;
+ dev->next = inputInfo.off_devices;
+ inputInfo.off_devices = dev;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
new file mode 100644
index 0000000000..af607df4f0
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-21886-2.patch
@@ -0,0 +1,57 @@
+From 26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8 Mon Sep 17 00:00:00 2001
+From: Peter Hutterer <peter.hutterer@who-t.net>
+Date: Fri, 5 Jan 2024 09:40:27 +1000
+Subject: [PATCH] dix: when disabling a master, float disabled slaved devices
+ too
+
+Disabling a master device floats all slave devices but we didn't do this
+to already-disabled slave devices. As a result those devices kept their
+reference to the master device resulting in access to already freed
+memory if the master device was removed before the corresponding slave
+device.
+
+And to match this behavior, also forcibly reset that pointer during
+CloseDownDevices().
+
+Related to CVE-2024-21886, ZDI-CAN-22840
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/26769aa71fcbe0a8403b7fb13b7c9010cc07c3a8]
+CVE: CVE-2024-21886
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ dix/devices.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/dix/devices.c b/dix/devices.c
+index 389d28a23c..84a6406d13 100644
+--- a/dix/devices.c
++++ b/dix/devices.c
+@@ -483,6 +483,13 @@ DisableDevice(DeviceIntPtr dev, BOOL sendevent)
+ flags[other->id] |= XISlaveDetached;
+ }
+ }
++
++ for (other = inputInfo.off_devices; other; other = other->next) {
++ if (!IsMaster(other) && GetMaster(other, MASTER_ATTACHED) == dev) {
++ AttachDevice(NULL, other, NULL);
++ flags[other->id] |= XISlaveDetached;
++ }
++ }
+ }
+ else {
+ for (other = inputInfo.devices; other; other = other->next) {
+@@ -1088,6 +1095,11 @@ CloseDownDevices(void)
+ dev->master = NULL;
+ }
+
++ for (dev = inputInfo.off_devices; dev; dev = dev->next) {
++ if (!IsMaster(dev) && !IsFloating(dev))
++ dev->master = NULL;
++ }
++
+ CloseDeviceList(&inputInfo.devices);
+ CloseDeviceList(&inputInfo.off_devices);
+
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
new file mode 100644
index 0000000000..da735efb2b
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31080.patch
@@ -0,0 +1,49 @@
+From 96798fc1967491c80a4d0c8d9e0a80586cb2152b Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:51:45 -0700
+Subject: [PATCH] Xi: ProcXIGetSelectedEvents needs to use unswapped length to
+ send reply
+
+CVE-2024-31080
+
+Reported-by: https://debbugs.gnu.org/cgi/bugreport.cgi?bug=69762
+Fixes: 53e821ab4 ("Xi: add request processing for XIGetSelectedEvents.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/96798fc1967491c80a4d0c8d9e0a80586cb2152b]
+CVE: CVE-2024-31080
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Xi/xiselectev.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xiselectev.c b/Xi/xiselectev.c
+index edcb8a0d36..ac14949871 100644
+--- a/Xi/xiselectev.c
++++ b/Xi/xiselectev.c
+@@ -349,6 +349,7 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ InputClientsPtr others = NULL;
+ xXIEventMask *evmask = NULL;
+ DeviceIntPtr dev;
++ uint32_t length;
+
+ REQUEST(xXIGetSelectedEventsReq);
+ REQUEST_SIZE_MATCH(xXIGetSelectedEventsReq);
+@@ -418,10 +419,12 @@ ProcXIGetSelectedEvents(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIGetSelectedEvents swaps it */
++ length = reply.length;
+ WriteReplyToClient(client, sizeof(xXIGetSelectedEventsReply), &reply);
+
+ if (reply.num_masks)
+- WriteToClient(client, reply.length * 4, buffer);
++ WriteToClient(client, length * 4, buffer);
+
+ free(buffer);
+ return Success;
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
new file mode 100644
index 0000000000..d2c551a0e5
--- /dev/null
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg/CVE-2024-31081.patch
@@ -0,0 +1,47 @@
+From 3e77295f888c67fc7645db5d0c00926a29ffecee Mon Sep 17 00:00:00 2001
+From: Alan Coopersmith <alan.coopersmith@oracle.com>
+Date: Fri, 22 Mar 2024 18:56:27 -0700
+Subject: [PATCH] Xi: ProcXIPassiveGrabDevice needs to use unswapped length to
+ send reply
+
+CVE-2024-31081
+
+Fixes: d220d6907 ("Xi: add GrabButton and GrabKeysym code.")
+Signed-off-by: Alan Coopersmith <alan.coopersmith@oracle.com>
+Part-of: <https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/1463>
+
+Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/xserver/-/commit/3e77295f888c67fc7645db5d0c00926a29ffecee]
+CVE: CVE-2024-31081
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ Xi/xipassivegrab.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/Xi/xipassivegrab.c b/Xi/xipassivegrab.c
+index c9ac2f8553..896233bec2 100644
+--- a/Xi/xipassivegrab.c
++++ b/Xi/xipassivegrab.c
+@@ -93,6 +93,7 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ GrabParameters param;
+ void *tmp;
+ int mask_len;
++ uint32_t length;
+
+ REQUEST(xXIPassiveGrabDeviceReq);
+ REQUEST_FIXED_SIZE(xXIPassiveGrabDeviceReq,
+@@ -247,9 +248,11 @@ ProcXIPassiveGrabDevice(ClientPtr client)
+ }
+ }
+
++ /* save the value before SRepXIPassiveGrabDevice swaps it */
++ length = rep.length;
+ WriteReplyToClient(client, sizeof(rep), &rep);
+ if (rep.num_modifiers)
+- WriteToClient(client, rep.length * 4, modifiers_failed);
++ WriteToClient(client, length * 4, modifiers_failed);
+
+ out:
+ free(modifiers_failed);
+--
+GitLab
+
diff --git a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb
index d176f390a4..04a6e734ef 100644
--- a/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb
+++ b/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.14.bb
@@ -5,7 +5,34 @@ SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.pat
file://0001-test-xtest-Initialize-array-with-braces.patch \
file://sdksyms-no-build-path.patch \
file://0001-drmmode_display.c-add-missing-mi.h-include.patch \
- "
+ file://CVE-2022-3550.patch \
+ file://CVE-2022-3551.patch \
+ file://CVE-2022-3553.patch \
+ file://CVE-2022-4283.patch \
+ file://CVE-2022-46340.patch \
+ file://CVE-2022-46341.patch \
+ file://CVE-2022-46342.patch \
+ file://CVE-2022-46343.patch \
+ file://CVE-2022-46344.patch \
+ file://CVE-2023-0494.patch \
+ file://CVE-2023-1393.patch \
+ file://CVE-2023-5367.patch \
+ file://CVE-2023-5380.patch \
+ file://CVE-2023-6377.patch \
+ file://CVE-2023-6478.patch \
+ file://CVE-2023-6816.patch \
+ file://CVE-2024-0229-1.patch \
+ file://CVE-2024-0229-2.patch \
+ file://CVE-2024-0229-3.patch \
+ file://CVE-2024-0229-4.patch \
+ file://CVE-2024-21885.patch \
+ file://CVE-2024-21886-1.patch \
+ file://CVE-2024-21886-2.patch \
+ file://CVE-2024-0408.patch \
+ file://CVE-2024-0409.patch \
+ file://CVE-2024-31081.patch \
+ file://CVE-2024-31080.patch \
+"
SRC_URI[md5sum] = "453fc86aac8c629b3a5b77e8dcca30bf"
SRC_URI[sha256sum] = "54b199c9280ff8bf0f73a54a759645bd0eeeda7255d1c99310d5b7595f3ac066"
diff --git a/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb b/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
index e4f7d1e372..d7c7918515 100644
--- a/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
+++ b/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb
@@ -11,6 +11,7 @@ SRC_URI += " \
file://0001-Disable-installing-header-file-provided-by-another-p.patch \
file://0001-Fix-build-for-Linux-5.8-rc1.patch \
file://0001-Fix-build-for-Linux-5.9-rc1.patch \
+file://fix-build-for-Linux-5.11-rc1.patch \
"
EXTRA_OEMAKE='KERNEL_DIR="${STAGING_KERNEL_DIR}" PREFIX="${D}"'
diff --git a/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch b/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch
new file mode 100644
index 0000000000..3ae77cb9d6
--- /dev/null
+++ b/meta/recipes-kernel/cryptodev/files/fix-build-for-Linux-5.11-rc1.patch
@@ -0,0 +1,32 @@
+From 55c6315058fc0dd189ffd116f2cc27ba4fa84cb6 Mon Sep 17 00:00:00 2001
+From: Joan Bruguera <joanbrugueram@gmail.com>
+Date: Mon, 28 Dec 2020 01:41:31 +0100
+Subject: [PATCH] Fix build for Linux 5.11-rc1
+
+ksys_close was removed, as far as I can tell, close_fd replaces it.
+
+See also: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8760c909f54a82aaa6e76da19afe798a0c77c3c3
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1572bfdf21d4d50e51941498ffe0b56c2289f783
+
+Upstream-Status: Backport [https://github.com/cryptodev-linux/cryptodev-linux/commit/55c6315058fc0dd189ffd116f2cc27ba4fa84cb6]
+Signed-off-by: Anuj Mittal <anuj.mittal@intel.com>
+---
+ ioctl.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/ioctl.c b/ioctl.c
+index 3d332380..95481d4f 100644
+--- a/ioctl.c
++++ b/ioctl.c
+@@ -871,8 +871,10 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
+ if (unlikely(ret)) {
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+ sys_close(fd);
+-#else
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+ ksys_close(fd);
++#else
++ close_fd(fd);
+ #endif
+ return ret;
+ }
diff --git a/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb b/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
index 871b36440f..206c6ccae7 100644
--- a/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
+++ b/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb
@@ -30,6 +30,9 @@ inherit autotools update-rc.d systemd
export LDFLAGS = "-L${STAGING_LIBDIR}"
EXTRA_OECONF = " --with-zlib=yes"
+# affects kexec-tools shipped by Fedora versions prior to 2.0.21-8 and RHEL versions prior to 2.0.20-47.
+CVE_CHECK_WHITELIST += "CVE-2021-20269"
+
do_compile_prepend() {
# Remove the prepackaged config.h from the source tree as it overrides
# the same file generated by configure and placed in the build tree
diff --git a/meta/recipes-kernel/kmod/kmod/ptest.patch b/meta/recipes-kernel/kmod/kmod/ptest.patch
deleted file mode 100644
index 831dbcb909..0000000000
--- a/meta/recipes-kernel/kmod/kmod/ptest.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Add 'install-ptest' rule.
-
-Signed-off-by: Tudor Florea <tudor.florea@enea.com>
-Upstream-Status: Pending
-
-diff -ruN a/Makefile.am b/Makefile.am
---- a/Makefile.am 2013-07-12 17:11:05.278331557 +0200
-+++ b/Makefile.am 2013-07-12 17:14:27.033788016 +0200
-@@ -204,6 +204,16 @@
-
- distclean-local: $(DISTCLEAN_LOCAL_HOOKS)
-
-+install-ptest:
-+ @$(MKDIR_P) $(DESTDIR)/testsuite
-+ @for file in $(TESTSUITE); do \
-+ install $$file $(DESTDIR)/testsuite; \
-+ done;
-+ @sed -e 's/^Makefile/_Makefile/' < Makefile > $(DESTDIR)/Makefile
-+ @$(MKDIR_P) $(DESTDIR)/tools
-+ @cp $(noinst_SCRIPTS) $(noinst_PROGRAMS) $(DESTDIR)/tools
-+ @cp -r testsuite/rootfs testsuite/.libs $(DESTDIR)/testsuite
-+
- # ------------------------------------------------------------------------------
- # custom release helpers
- # ------------------------------------------------------------------------------
diff --git a/meta/recipes-kernel/linux-firmware/linux-firmware_20220411.bb b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
index ad8dbac17f..873ba9cdf0 100644
--- a/meta/recipes-kernel/linux-firmware/linux-firmware_20220411.bb
+++ b/meta/recipes-kernel/linux-firmware/linux-firmware_20240220.bb
@@ -27,7 +27,6 @@ LICENSE = "\
& Firmware-go7007 \
& Firmware-GPLv2 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-i915 \
& Firmware-ibt_firmware \
& Firmware-ice \
@@ -46,6 +45,7 @@ LICENSE = "\
& Firmware-phanfw \
& Firmware-qat \
& Firmware-qcom \
+ & Firmware-qcom-yamato \
& Firmware-qla1280 \
& Firmware-qla2xxx \
& Firmware-qualcommAthos_ar3k \
@@ -57,7 +57,6 @@ LICENSE = "\
& Firmware-rtlwifi_firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -72,8 +71,8 @@ LICENSE = "\
LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \
file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \
- file://LICENSE.amdgpu;md5=44c1166d052226cb2d6c8d7400090203 \
- file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \
+ file://LICENSE.amdgpu;md5=a2589a05ea5b6bd2b7f4f623c7e7a649 \
+ file://LICENSE.amd-ucode;md5=6ca90c57f7b248de1e25c7f68ffc4698 \
file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \
file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \
file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \
@@ -91,7 +90,6 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \
file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \
- file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \
file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \
file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \
file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \
@@ -110,8 +108,9 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \
file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \
file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \
- file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \
+ file://LICENCE.qat_firmware;md5=72de83dfd9b87be7685ed099a39fbea4 \
file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \
+ file://LICENSE.qcom_yamato;md5=d0de0eeccaf1843a850bf7a6777eec5c \
file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \
file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \
file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \
@@ -123,7 +122,6 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \
file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \
file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \
- file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \
file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \
file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \
file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \
@@ -132,8 +130,11 @@ LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \
file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \
file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \
file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \
- file://WHENCE;md5=4cf67d71a21887c682c3989a4318745e \
+ file://WHENCE;md5=${WHENCE_CHKSUM} \
"
+# WHENCE checksum is defined separately to ease overriding it if
+# class-devupstream is selected.
+WHENCE_CHKSUM = "a344e6c28970fc7daafa81c10247aeb6"
# These are not common licenses, set NO_GENERIC_LICENSE for them
# so that the license files will be copied from fetched source
@@ -159,7 +160,6 @@ NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28"
NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007"
NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2"
NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware"
-NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m"
NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915"
NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware"
NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice"
@@ -179,6 +179,7 @@ NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware"
NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw"
NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware"
NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom"
+NO_GENERIC_LICENSE[Firmware-qcom-yamato] = "LICENSE.qcom_yamato"
NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280"
NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx"
NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k"
@@ -190,7 +191,6 @@ NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt"
NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt"
NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano"
NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware"
-NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt"
NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity"
NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone"
NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware"
@@ -203,9 +203,16 @@ NO_GENERIC_LICENSE[WHENCE] = "WHENCE"
PE = "1"
-SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz"
+SRC_URI = "\
+ ${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz \
+"
+
+BBCLASSEXTEND = "devupstream:target"
+SRC_URI:class-devupstream = "git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git;protocol=https;branch=main"
+# Pin this to the 20220509 release, override this in local.conf
+SRCREV:class-devupstream ?= "b19cbdca78ab2adfd210c91be15a22568e8b8cae"
-SRC_URI[sha256sum] = "020b11f6412f4956f5a6f98de7d41867d2b30ea0ce81b1e2d206ec9840363849"
+SRC_URI[sha256sum] = "bf0f239dc0801e9d6bf5d5fb3e2f549575632cf4688f4348184199cb02c2bcd7"
inherit allarch
@@ -216,7 +223,8 @@ do_compile() {
}
do_install() {
- oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install
+ # install-nodedup avoids rdfind dependency
+ oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install-nodedup
cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/
}
@@ -232,6 +240,7 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-rs9113 ${PN}-rs9116 \
${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \
${PN}-rtl8168 \
+ ${PN}-rtl8822 \
${PN}-cypress-license \
${PN}-broadcom-license \
${PN}-bcm-0bb4-0306 \
@@ -301,13 +310,20 @@ PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \
${PN}-nvidia-gpu \
${PN}-netronome-license ${PN}-netronome \
${PN}-qat ${PN}-qat-license \
- ${PN}-qcom-license \
+ ${PN}-qcom-license ${PN}-qcom-yamato-license \
${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 ${PN}-qcom-venus-5.2 ${PN}-qcom-venus-5.4 \
${PN}-qcom-vpu-1.0 ${PN}-qcom-vpu-2.0 \
- ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 \
+ ${PN}-qcom-adreno-a2xx ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a4xx ${PN}-qcom-adreno-a530 \
${PN}-qcom-adreno-a630 ${PN}-qcom-adreno-a650 ${PN}-qcom-adreno-a660 \
- ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
- ${PN}-qcom-sm8250-audio ${PN}-qcom-sm8250-compute \
+ ${PN}-qcom-apq8016-modem ${PN}-qcom-apq8016-wifi \
+ ${PN}-qcom-apq8096-adreno ${PN}-qcom-apq8096-audio ${PN}-qcom-apq8096-modem \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compat \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-audio \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-adreno \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-compute \
+ ${PN}-qcom-sc8280xp-lenovo-x13s-sensors \
+ ${PN}-qcom-sdm845-adreno ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \
+ ${PN}-qcom-sm8250-adreno ${PN}-qcom-sm8250-audio ${PN}-qcom-sm8250-compute \
${PN}-amlogic-vdec-license ${PN}-amlogic-vdec \
${PN}-lt9611uxc ${PN}-lontium-license \
${PN}-whence-license \
@@ -398,7 +414,7 @@ LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware"
FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware"
FILES_${PN}-mt7601u = " \
- ${nonarch_base_libdir}/firmware/mt7601u.bin \
+ ${nonarch_base_libdir}/firmware/mediatek/mt7601u.bin \
"
RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license"
@@ -550,6 +566,7 @@ LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware"
+LICENSE_${PN}-rtl8822 = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware"
LICENSE_${PN}-rtl8168 = "WHENCE"
@@ -577,6 +594,11 @@ FILES_${PN}-rtl8821 = " \
FILES_${PN}-rtl8168 = " \
${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \
"
+FILES_${PN}-rtl8822 = " \
+ ${nonarch_base_libdir}/firmware/rtl_bt/rtl8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtw88/rtw8822*.bin \
+ ${nonarch_base_libdir}/firmware/rtlwifi/rtl8822*.bin \
+"
RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license"
@@ -584,6 +606,7 @@ RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license"
+RDEPENDS_${PN}-rtl8822 += "${PN}-rtl-license"
RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license"
# For ti-connectivity
@@ -951,21 +974,67 @@ RDEPENDS_${PN}-qat = "${PN}-qat-license"
# For QCOM VPU/GPU and SDM845
LICENSE_${PN}-qcom-license = "Firmware-qcom"
+LICENSE_${PN}-qcom-yamato-license = "Firmware-qcom-yamato"
+LICENSE_${PN}-qcom-venus-1.8 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-4.2 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-5.2 = "Firmware-qcom"
+LICENSE_${PN}-qcom-venus-5.4 = "Firmware-qcom"
+LICENSE_${PN}-qcom-vpu-1.0 = "Firmware-qcom"
+LICENSE_${PN}-qcom-vpu-2.0 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a2xx = "Firmware-qcom Firmware-qcom-yamato"
+LICENSE_${PN}-qcom-adreno-a3xx = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a4xx = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a530 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a630 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a650 = "Firmware-qcom"
+LICENSE_${PN}-qcom-adreno-a660 = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8016-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8016-wifi = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-apq8096-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "Firmware-qcom"
+LICENSE_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-compute = "Firmware-qcom"
+LICENSE_${PN}-qcom-sdm845-modem = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-audio = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-adreno = "Firmware-qcom"
+LICENSE_${PN}-qcom-sm8250-compute = "Firmware-qcom"
+
FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt"
+FILES_${PN}-qcom-yamato-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom_yamato"
FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*"
FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*"
FILES_${PN}-qcom-venus-5.2 = "${nonarch_base_libdir}/firmware/qcom/venus-5.2/*"
FILES_${PN}-qcom-venus-5.4 = "${nonarch_base_libdir}/firmware/qcom/venus-5.4/*"
FILES_${PN}-qcom-vpu-1.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-1.0/*"
FILES_${PN}-qcom-vpu-2.0 = "${nonarch_base_libdir}/firmware/qcom/vpu-2.0/*"
-FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw"
-FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*"
-FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
-FILES_${PN}-qcom-adreno-a650 = "${nonarch_base_libdir}/firmware/qcom/a650*.* ${nonarch_base_libdir}/firmware/qcom/sm8250/a650*.*"
+FILES_${PN}-qcom-adreno-a2xx = "${nonarch_base_libdir}/firmware/qcom/leia_*.fw ${nonarch_base_libdir}/firmware/qcom/yamato_*.fw"
+FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a3*_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw"
+FILES_${PN}-qcom-adreno-a4xx = "${nonarch_base_libdir}/firmware/qcom/a4*_*.fw"
+FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.fw*"
+FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.*"
+FILES_${PN}-qcom-adreno-a650 = "${nonarch_base_libdir}/firmware/qcom/a650*.*"
FILES_${PN}-qcom-adreno-a660 = "${nonarch_base_libdir}/firmware/qcom/a660*.*"
+FILES_${PN}-qcom-apq8016-modem = "${nonarch_base_libdir}/firmware/qcom/apq8016/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/modem.mbn"
+FILES_${PN}-qcom-apq8016-wifi = "${nonarch_base_libdir}/firmware/qcom/apq8016/wcnss.mbn ${nonarch_base_libdir}/firmware/qcom/apq8016/WCNSS*"
+FILES_${PN}-qcom-apq8096-adreno = "${nonarch_base_libdir}/firmware/qcom/apq8096/a530_zap.mbn ${nonarch_base_libdir}/firmware/qcom/a530_zap.mdt"
+FILES_${PN}-qcom-apq8096-audio = "${nonarch_base_libdir}/firmware/qcom/apq8096/adsp*.*"
+FILES_${PN}-qcom-apq8096-modem = "${nonarch_base_libdir}/firmware/qcom/apq8096/mba.mbn ${nonarch_base_libdir}/firmware/qcom/apq8096/modem*.* ${nonarch_base_libdir}/firmware/qcom/apq8096/wlanmdsp.mbn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-compat = "${nonarch_base_libdir}/firmware/qcom/LENOVO/21BX"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*adsp*.* ${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/battmgr.jsn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/qcdxkmsuc8280.mbn"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*cdsp*.*"
+FILES_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${nonarch_base_libdir}/firmware/qcom/sc8280xp/LENOVO/21BX/*slpi*.*"
+FILES_${PN}-qcom-sdm845-adreno = "${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*"
FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*"
FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*"
FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn"
+FILES_${PN}-qcom-sm8250-adreno = "${nonarch_base_libdir}/firmware/qcom/sm8250/a650*.*"
FILES_${PN}-qcom-sm8250-audio = "${nonarch_base_libdir}/firmware/qcom/sm8250/adsp*.*"
FILES_${PN}-qcom-sm8250-compute = "${nonarch_base_libdir}/firmware/qcom/sm8250/cdsp*.*"
RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license"
@@ -974,17 +1043,32 @@ RDEPENDS_${PN}-qcom-venus-5.2 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-venus-5.4 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-vpu-1.0 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-vpu-2.0 = "${PN}-qcom-license"
-RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a2xx = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-adreno-a2xx = "${PN}-qcom-license ${PN}-qcom-yamato-license"
+RDEPENDS_${PN}-qcom-adreno-a4xx = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a650 = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-adreno-a660 = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8016-modem = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8016-wifi = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8096-audio = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-apq8096-modem = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-license"
+RDEPENDS_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sm8250-audio = "${PN}-qcom-license"
RDEPENDS_${PN}-qcom-sm8250-compute = "${PN}-qcom-license"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-audio = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-adreno = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-compute = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+RRECOMMENDS_${PN}-qcom-sc8280xp-lenovo-x13s-sensors = "${PN}-qcom-sc8280xp-lenovo-x13s-compat"
+
FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio"
# For Amlogic VDEC
@@ -1012,7 +1096,6 @@ LICENSE_${PN} = "\
& Firmware-fw_sst_0f28 \
& Firmware-go7007 \
& Firmware-hfi1_firmware \
- & Firmware-i2400m \
& Firmware-ibt_firmware \
& Firmware-it913x \
& Firmware-IntcSST2 \
@@ -1033,7 +1116,6 @@ LICENSE_${PN} = "\
& Firmware-ralink-firmware \
& Firmware-imx-sdma_firmware \
& Firmware-siano \
- & Firmware-tda7706-firmware \
& Firmware-ti-connectivity \
& Firmware-ti-keystone \
& Firmware-ueagle-atm4-firmware \
@@ -1066,3 +1148,6 @@ python populate_packages_prepend () {
# Firmware files are generally not ran on the CPU, so they can be
# allarch despite being architecture specific
INSANE_SKIP = "arch"
+
+# Don't warn about already stripped files
+INSANE_SKIP:${PN} = "already-stripped"
diff --git a/meta/recipes-kernel/linux/cve-exclusion.inc b/meta/recipes-kernel/linux/cve-exclusion.inc
new file mode 100644
index 0000000000..efc8b09475
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion.inc
@@ -0,0 +1,13 @@
+# Kernel CVE exclusion file
+
+# https://nvd.nist.gov/vuln/detail/CVE-2020-29373
+# Patched in kernel since v5.6 ff002b30181d30cdfbca316dadd099c3ca0d739c
+# Backported in version v5.4.24 cac68d12c531aa3010509a5a55a5dfd18dedaa80
+CVE_CHECK_WHITELIST += "CVE-2020-29373"
+
+# https://nvd.nist.gov/vuln/detail/CVE-2022-39188
+# Patched in kernel since v5.19 b67fbebd4cf980aecbcc750e1462128bffe8ae15
+# Backported in version v5.4.212 c9c5501e815132530d741ec9fdd22657f91656bc
+# Backported in version v5.10.141 895428ee124ad70b9763259308354877b725c31d
+# Backported in version v5.15.65 3ffb97fce282df03723995f5eed6a559d008078e
+CVE_CHECK_WHITELIST += "CVE-2022-39188"
diff --git a/meta/recipes-kernel/linux/cve-exclusion_5.4.inc b/meta/recipes-kernel/linux/cve-exclusion_5.4.inc
new file mode 100644
index 0000000000..b0b33bcc1d
--- /dev/null
+++ b/meta/recipes-kernel/linux/cve-exclusion_5.4.inc
@@ -0,0 +1,9445 @@
+
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at 2024-04-14 04:45:05.585211 for version 5.4.273
+
+python check_kernel_cve_status_version() {
+ this_version = "5.4.273"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_WHITELIST += "CVE-2003-1604"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2004-0230"
+
+# CVE-2005-3660 has no known resolution
+
+# fixed-version: Fixed after version 2.6.26rc5
+CVE_CHECK_WHITELIST += "CVE-2006-3635"
+
+# fixed-version: Fixed after version 2.6.19rc3
+CVE_CHECK_WHITELIST += "CVE-2006-5331"
+
+# fixed-version: Fixed after version 2.6.19rc2
+CVE_CHECK_WHITELIST += "CVE-2006-6128"
+
+# CVE-2007-3719 has no known resolution
+
+# fixed-version: Fixed after version 2.6.12rc2
+CVE_CHECK_WHITELIST += "CVE-2007-4774"
+
+# fixed-version: Fixed after version 2.6.24rc6
+CVE_CHECK_WHITELIST += "CVE-2007-6761"
+
+# fixed-version: Fixed after version 2.6.20rc5
+CVE_CHECK_WHITELIST += "CVE-2007-6762"
+
+# CVE-2008-2544 has no known resolution
+
+# CVE-2008-4609 has no known resolution
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_WHITELIST += "CVE-2008-7316"
+
+# fixed-version: Fixed after version 2.6.31rc6
+CVE_CHECK_WHITELIST += "CVE-2009-2692"
+
+# fixed-version: Fixed after version 2.6.23rc9
+CVE_CHECK_WHITELIST += "CVE-2010-0008"
+
+# fixed-version: Fixed after version 2.6.36rc5
+CVE_CHECK_WHITELIST += "CVE-2010-3432"
+
+# CVE-2010-4563 has no known resolution
+
+# fixed-version: Fixed after version 2.6.37rc6
+CVE_CHECK_WHITELIST += "CVE-2010-4648"
+
+# fixed-version: Fixed after version 2.6.38rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5313"
+
+# CVE-2010-5321 has no known resolution
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5328"
+
+# fixed-version: Fixed after version 2.6.39rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5329"
+
+# fixed-version: Fixed after version 2.6.34rc7
+CVE_CHECK_WHITELIST += "CVE-2010-5331"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_WHITELIST += "CVE-2010-5332"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4098"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4131"
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-4915"
+
+# CVE-2011-4916 has no known resolution
+
+# CVE-2011-4917 has no known resolution
+
+# fixed-version: Fixed after version 3.2rc1
+CVE_CHECK_WHITELIST += "CVE-2011-5321"
+
+# fixed-version: Fixed after version 3.1rc1
+CVE_CHECK_WHITELIST += "CVE-2011-5327"
+
+# fixed-version: Fixed after version 3.7rc2
+CVE_CHECK_WHITELIST += "CVE-2012-0957"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2119"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2136"
+
+# fixed-version: Fixed after version 3.5rc2
+CVE_CHECK_WHITELIST += "CVE-2012-2137"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_WHITELIST += "CVE-2012-2313"
+
+# fixed-version: Fixed after version 3.4rc6
+CVE_CHECK_WHITELIST += "CVE-2012-2319"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2012-2372"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2375"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2390"
+
+# fixed-version: Fixed after version 3.5rc4
+CVE_CHECK_WHITELIST += "CVE-2012-2669"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_WHITELIST += "CVE-2012-2744"
+
+# fixed-version: Fixed after version 3.4rc3
+CVE_CHECK_WHITELIST += "CVE-2012-2745"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_WHITELIST += "CVE-2012-3364"
+
+# fixed-version: Fixed after version 3.4rc5
+CVE_CHECK_WHITELIST += "CVE-2012-3375"
+
+# fixed-version: Fixed after version 3.5rc5
+CVE_CHECK_WHITELIST += "CVE-2012-3400"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_WHITELIST += "CVE-2012-3412"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-3430"
+
+# fixed-version: Fixed after version 2.6.19rc4
+CVE_CHECK_WHITELIST += "CVE-2012-3510"
+
+# fixed-version: Fixed after version 3.5rc6
+CVE_CHECK_WHITELIST += "CVE-2012-3511"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-3520"
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_WHITELIST += "CVE-2012-3552"
+
+# Skipping CVE-2012-4220, no affected_versions
+
+# Skipping CVE-2012-4221, no affected_versions
+
+# Skipping CVE-2012-4222, no affected_versions
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-4398"
+
+# fixed-version: Fixed after version 2.6.36rc4
+CVE_CHECK_WHITELIST += "CVE-2012-4444"
+
+# fixed-version: Fixed after version 3.7rc6
+CVE_CHECK_WHITELIST += "CVE-2012-4461"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_WHITELIST += "CVE-2012-4467"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_WHITELIST += "CVE-2012-4508"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-4530"
+
+# CVE-2012-4542 has no known resolution
+
+# fixed-version: Fixed after version 3.7rc4
+CVE_CHECK_WHITELIST += "CVE-2012-4565"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5374"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5375"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-5517"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6536"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6537"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2012-6538"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6539"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6540"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6541"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6542"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6543"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6544"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6545"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2012-6546"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6547"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6548"
+
+# fixed-version: Fixed after version 3.6rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6549"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6638"
+
+# fixed-version: Fixed after version 3.6rc2
+CVE_CHECK_WHITELIST += "CVE-2012-6647"
+
+# fixed-version: Fixed after version 3.6
+CVE_CHECK_WHITELIST += "CVE-2012-6657"
+
+# fixed-version: Fixed after version 3.6rc5
+CVE_CHECK_WHITELIST += "CVE-2012-6689"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6701"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6703"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6704"
+
+# fixed-version: Fixed after version 3.4rc1
+CVE_CHECK_WHITELIST += "CVE-2012-6712"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-0160"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0190"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0216"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0217"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_WHITELIST += "CVE-2013-0228"
+
+# fixed-version: Fixed after version 3.8rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0231"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-0268"
+
+# fixed-version: Fixed after version 3.8
+CVE_CHECK_WHITELIST += "CVE-2013-0290"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2013-0309"
+
+# fixed-version: Fixed after version 3.5
+CVE_CHECK_WHITELIST += "CVE-2013-0310"
+
+# fixed-version: Fixed after version 3.7rc8
+CVE_CHECK_WHITELIST += "CVE-2013-0311"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0313"
+
+# fixed-version: Fixed after version 3.11rc7
+CVE_CHECK_WHITELIST += "CVE-2013-0343"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-0349"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-0871"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-0913"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-0914"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1059"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1763"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1767"
+
+# fixed-version: Fixed after version 3.5rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1772"
+
+# fixed-version: Fixed after version 3.3rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1773"
+
+# fixed-version: Fixed after version 3.8rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1774"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1792"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1796"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1797"
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-1798"
+
+# fixed-version: Fixed after version 3.8rc6
+CVE_CHECK_WHITELIST += "CVE-2013-1819"
+
+# fixed-version: Fixed after version 3.6rc7
+CVE_CHECK_WHITELIST += "CVE-2013-1826"
+
+# fixed-version: Fixed after version 3.6rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1827"
+
+# fixed-version: Fixed after version 3.9rc2
+CVE_CHECK_WHITELIST += "CVE-2013-1828"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1848"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1858"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1860"
+
+# fixed-version: Fixed after version 3.7rc3
+CVE_CHECK_WHITELIST += "CVE-2013-1928"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_WHITELIST += "CVE-2013-1929"
+
+# Skipping CVE-2013-1935, no affected_versions
+
+# fixed-version: Fixed after version 3.0rc1
+CVE_CHECK_WHITELIST += "CVE-2013-1943"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1956"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1957"
+
+# fixed-version: Fixed after version 3.9rc5
+CVE_CHECK_WHITELIST += "CVE-2013-1958"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-1959"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-1979"
+
+# fixed-version: Fixed after version 3.8rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2015"
+
+# fixed-version: Fixed after version 2.6.34
+CVE_CHECK_WHITELIST += "CVE-2013-2017"
+
+# fixed-version: Fixed after version 3.8rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2058"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2094"
+
+# fixed-version: Fixed after version 2.6.34rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2128"
+
+# fixed-version: Fixed after version 3.11rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2140"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2141"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2146"
+
+# fixed-version: Fixed after version 3.12rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2147"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2148"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2164"
+
+# Skipping CVE-2013-2188, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2206"
+
+# Skipping CVE-2013-2224, no affected_versions
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_WHITELIST += "CVE-2013-2232"
+
+# fixed-version: Fixed after version 3.10
+CVE_CHECK_WHITELIST += "CVE-2013-2234"
+
+# fixed-version: Fixed after version 3.9rc6
+CVE_CHECK_WHITELIST += "CVE-2013-2237"
+
+# Skipping CVE-2013-2239, no affected_versions
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2546"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2547"
+
+# fixed-version: Fixed after version 3.9rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2548"
+
+# fixed-version: Fixed after version 3.9rc8
+CVE_CHECK_WHITELIST += "CVE-2013-2596"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2634"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2635"
+
+# fixed-version: Fixed after version 3.9rc3
+CVE_CHECK_WHITELIST += "CVE-2013-2636"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_WHITELIST += "CVE-2013-2850"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2851"
+
+# fixed-version: Fixed after version 3.10rc6
+CVE_CHECK_WHITELIST += "CVE-2013-2852"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2888"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2889"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2890"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2891"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2892"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2893"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2894"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2895"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2896"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-2897"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2898"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2899"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2929"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-2930"
+
+# fixed-version: Fixed after version 3.9
+CVE_CHECK_WHITELIST += "CVE-2013-3076"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3222"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3223"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3224"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3225"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3226"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3227"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3228"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3229"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3230"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3231"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3232"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3233"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3234"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3235"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3236"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3237"
+
+# fixed-version: Fixed after version 3.9rc7
+CVE_CHECK_WHITELIST += "CVE-2013-3301"
+
+# fixed-version: Fixed after version 3.8rc3
+CVE_CHECK_WHITELIST += "CVE-2013-3302"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4125"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4127"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4129"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4162"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4163"
+
+# fixed-version: Fixed after version 3.11rc5
+CVE_CHECK_WHITELIST += "CVE-2013-4205"
+
+# fixed-version: Fixed after version 3.10rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4220"
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_WHITELIST += "CVE-2013-4247"
+
+# fixed-version: Fixed after version 3.11rc6
+CVE_CHECK_WHITELIST += "CVE-2013-4254"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4270"
+
+# fixed-version: Fixed after version 3.12rc6
+CVE_CHECK_WHITELIST += "CVE-2013-4299"
+
+# fixed-version: Fixed after version 3.11
+CVE_CHECK_WHITELIST += "CVE-2013-4300"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4312"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4343"
+
+# fixed-version: Fixed after version 3.13rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4345"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4348"
+
+# fixed-version: Fixed after version 3.12rc2
+CVE_CHECK_WHITELIST += "CVE-2013-4350"
+
+# fixed-version: Fixed after version 3.12rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4387"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-4470"
+
+# fixed-version: Fixed after version 3.10rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4483"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4511"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4512"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4513"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4514"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4515"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-4516"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4563"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2013-4579"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4587"
+
+# fixed-version: Fixed after version 2.6.33rc4
+CVE_CHECK_WHITELIST += "CVE-2013-4588"
+
+# fixed-version: Fixed after version 3.8rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4591"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2013-4592"
+
+# Skipping CVE-2013-4737, no affected_versions
+
+# Skipping CVE-2013-4738, no affected_versions
+
+# Skipping CVE-2013-4739, no affected_versions
+
+# fixed-version: Fixed after version 3.10rc5
+CVE_CHECK_WHITELIST += "CVE-2013-5634"
+
+# fixed-version: Fixed after version 3.6rc6
+CVE_CHECK_WHITELIST += "CVE-2013-6282"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6367"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6368"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6376"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6378"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6380"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6381"
+
+# fixed-version: Fixed after version 3.13rc4
+CVE_CHECK_WHITELIST += "CVE-2013-6382"
+
+# fixed-version: Fixed after version 3.12
+CVE_CHECK_WHITELIST += "CVE-2013-6383"
+
+# Skipping CVE-2013-6392, no affected_versions
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6431"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6432"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2013-6885"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7026"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7027"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7263"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7264"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7265"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7266"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7267"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7268"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7269"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7270"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7271"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7281"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7339"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7348"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2013-7421"
+
+# CVE-2013-7445 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2013-7446"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2013-7470"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-0038"
+
+# fixed-version: Fixed after version 3.14rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0049"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-0055"
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_WHITELIST += "CVE-2014-0069"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-0077"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-0100"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-0101"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-0102"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-0131"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-0155"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0181"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0196"
+
+# fixed-version: Fixed after version 2.6.33rc5
+CVE_CHECK_WHITELIST += "CVE-2014-0203"
+
+# fixed-version: Fixed after version 2.6.37rc1
+CVE_CHECK_WHITELIST += "CVE-2014-0205"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-0206"
+
+# Skipping CVE-2014-0972, no affected_versions
+
+# fixed-version: Fixed after version 3.13
+CVE_CHECK_WHITELIST += "CVE-2014-1438"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1444"
+
+# fixed-version: Fixed after version 3.12rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1445"
+
+# fixed-version: Fixed after version 3.13rc7
+CVE_CHECK_WHITELIST += "CVE-2014-1446"
+
+# fixed-version: Fixed after version 3.13rc8
+CVE_CHECK_WHITELIST += "CVE-2014-1690"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-1737"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2014-1738"
+
+# fixed-version: Fixed after version 3.15rc6
+CVE_CHECK_WHITELIST += "CVE-2014-1739"
+
+# fixed-version: Fixed after version 3.14rc2
+CVE_CHECK_WHITELIST += "CVE-2014-1874"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2038"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_WHITELIST += "CVE-2014-2039"
+
+# fixed-version: Fixed after version 3.14rc7
+CVE_CHECK_WHITELIST += "CVE-2014-2309"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2523"
+
+# fixed-version: Fixed after version 3.14
+CVE_CHECK_WHITELIST += "CVE-2014-2568"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2580"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2672"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2673"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2678"
+
+# fixed-version: Fixed after version 3.14rc6
+CVE_CHECK_WHITELIST += "CVE-2014-2706"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-2739"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-2851"
+
+# fixed-version: Fixed after version 3.2rc7
+CVE_CHECK_WHITELIST += "CVE-2014-2889"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3122"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3144"
+
+# fixed-version: Fixed after version 3.15rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3145"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_WHITELIST += "CVE-2014-3153"
+
+# fixed-version: Fixed after version 3.17rc4
+CVE_CHECK_WHITELIST += "CVE-2014-3180"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3181"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3182"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3183"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3184"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3185"
+
+# fixed-version: Fixed after version 3.17rc3
+CVE_CHECK_WHITELIST += "CVE-2014-3186"
+
+# Skipping CVE-2014-3519, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_WHITELIST += "CVE-2014-3534"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3535"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3601"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3610"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3611"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-3631"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3645"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3646"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-3647"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3673"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3687"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3688"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3690"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-3917"
+
+# fixed-version: Fixed after version 3.15
+CVE_CHECK_WHITELIST += "CVE-2014-3940"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4014"
+
+# fixed-version: Fixed after version 3.14rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4027"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4157"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4171"
+
+# Skipping CVE-2014-4322, no affected_versions
+
+# Skipping CVE-2014-4323, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4508"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4608"
+
+# fixed-version: Fixed after version 3.16rc3
+CVE_CHECK_WHITELIST += "CVE-2014-4611"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4652"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4653"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4654"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4655"
+
+# fixed-version: Fixed after version 3.16rc2
+CVE_CHECK_WHITELIST += "CVE-2014-4656"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-4667"
+
+# fixed-version: Fixed after version 3.16rc4
+CVE_CHECK_WHITELIST += "CVE-2014-4699"
+
+# fixed-version: Fixed after version 3.16rc6
+CVE_CHECK_WHITELIST += "CVE-2014-4943"
+
+# fixed-version: Fixed after version 3.16rc7
+CVE_CHECK_WHITELIST += "CVE-2014-5045"
+
+# fixed-version: Fixed after version 3.16
+CVE_CHECK_WHITELIST += "CVE-2014-5077"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-5206"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-5207"
+
+# Skipping CVE-2014-5332, no affected_versions
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-5471"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-5472"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6410"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6416"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6417"
+
+# fixed-version: Fixed after version 3.17rc5
+CVE_CHECK_WHITELIST += "CVE-2014-6418"
+
+# fixed-version: Fixed after version 3.17rc2
+CVE_CHECK_WHITELIST += "CVE-2014-7145"
+
+# Skipping CVE-2014-7207, no affected_versions
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7283"
+
+# fixed-version: Fixed after version 3.15rc7
+CVE_CHECK_WHITELIST += "CVE-2014-7284"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7822"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-7825"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-7826"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_WHITELIST += "CVE-2014-7841"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7842"
+
+# fixed-version: Fixed after version 3.18rc5
+CVE_CHECK_WHITELIST += "CVE-2014-7843"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7970"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-7975"
+
+# fixed-version: Fixed after version 3.18rc3
+CVE_CHECK_WHITELIST += "CVE-2014-8086"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8133"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8134"
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_WHITELIST += "CVE-2014-8159"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8160"
+
+# fixed-version: Fixed after version 3.12rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8171"
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8172"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_WHITELIST += "CVE-2014-8173"
+
+# Skipping CVE-2014-8181, no affected_versions
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8369"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8480"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-8481"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8559"
+
+# fixed-version: Fixed after version 3.14rc3
+CVE_CHECK_WHITELIST += "CVE-2014-8709"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8884"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-8989"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_WHITELIST += "CVE-2014-9090"
+
+# fixed-version: Fixed after version 3.18rc6
+CVE_CHECK_WHITELIST += "CVE-2014-9322"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9419"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9420"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9428"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9529"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9584"
+
+# fixed-version: Fixed after version 3.19rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9585"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9644"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9683"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9710"
+
+# fixed-version: Fixed after version 3.15rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9715"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9717"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9728"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9729"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9730"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2014-9731"
+
+# Skipping CVE-2014-9777, no affected_versions
+
+# Skipping CVE-2014-9778, no affected_versions
+
+# Skipping CVE-2014-9779, no affected_versions
+
+# Skipping CVE-2014-9780, no affected_versions
+
+# Skipping CVE-2014-9781, no affected_versions
+
+# Skipping CVE-2014-9782, no affected_versions
+
+# Skipping CVE-2014-9783, no affected_versions
+
+# Skipping CVE-2014-9784, no affected_versions
+
+# Skipping CVE-2014-9785, no affected_versions
+
+# Skipping CVE-2014-9786, no affected_versions
+
+# Skipping CVE-2014-9787, no affected_versions
+
+# Skipping CVE-2014-9788, no affected_versions
+
+# Skipping CVE-2014-9789, no affected_versions
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9803"
+
+# Skipping CVE-2014-9863, no affected_versions
+
+# Skipping CVE-2014-9864, no affected_versions
+
+# Skipping CVE-2014-9865, no affected_versions
+
+# Skipping CVE-2014-9866, no affected_versions
+
+# Skipping CVE-2014-9867, no affected_versions
+
+# Skipping CVE-2014-9868, no affected_versions
+
+# Skipping CVE-2014-9869, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9870"
+
+# Skipping CVE-2014-9871, no affected_versions
+
+# Skipping CVE-2014-9872, no affected_versions
+
+# Skipping CVE-2014-9873, no affected_versions
+
+# Skipping CVE-2014-9874, no affected_versions
+
+# Skipping CVE-2014-9875, no affected_versions
+
+# Skipping CVE-2014-9876, no affected_versions
+
+# Skipping CVE-2014-9877, no affected_versions
+
+# Skipping CVE-2014-9878, no affected_versions
+
+# Skipping CVE-2014-9879, no affected_versions
+
+# Skipping CVE-2014-9880, no affected_versions
+
+# Skipping CVE-2014-9881, no affected_versions
+
+# Skipping CVE-2014-9882, no affected_versions
+
+# Skipping CVE-2014-9883, no affected_versions
+
+# Skipping CVE-2014-9884, no affected_versions
+
+# Skipping CVE-2014-9885, no affected_versions
+
+# Skipping CVE-2014-9886, no affected_versions
+
+# Skipping CVE-2014-9887, no affected_versions
+
+# fixed-version: Fixed after version 3.13rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9888"
+
+# Skipping CVE-2014-9889, no affected_versions
+
+# Skipping CVE-2014-9890, no affected_versions
+
+# Skipping CVE-2014-9891, no affected_versions
+
+# Skipping CVE-2014-9892, no affected_versions
+
+# Skipping CVE-2014-9893, no affected_versions
+
+# Skipping CVE-2014-9894, no affected_versions
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9895"
+
+# Skipping CVE-2014-9896, no affected_versions
+
+# Skipping CVE-2014-9897, no affected_versions
+
+# Skipping CVE-2014-9898, no affected_versions
+
+# Skipping CVE-2014-9899, no affected_versions
+
+# Skipping CVE-2014-9900, no affected_versions
+
+# fixed-version: Fixed after version 3.14rc4
+CVE_CHECK_WHITELIST += "CVE-2014-9903"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9904"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9914"
+
+# fixed-version: Fixed after version 3.18rc2
+CVE_CHECK_WHITELIST += "CVE-2014-9922"
+
+# fixed-version: Fixed after version 3.19rc1
+CVE_CHECK_WHITELIST += "CVE-2014-9940"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_WHITELIST += "CVE-2015-0239"
+
+# fixed-version: Fixed after version 3.15rc5
+CVE_CHECK_WHITELIST += "CVE-2015-0274"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-0275"
+
+# Skipping CVE-2015-0777, no affected_versions
+
+# Skipping CVE-2015-1328, no affected_versions
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1333"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1339"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1350"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1420"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1421"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-1465"
+
+# fixed-version: Fixed after version 3.19rc5
+CVE_CHECK_WHITELIST += "CVE-2015-1573"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1593"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2015-1805"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-2041"
+
+# fixed-version: Fixed after version 3.19
+CVE_CHECK_WHITELIST += "CVE-2015-2042"
+
+# fixed-version: Fixed after version 4.0rc4
+CVE_CHECK_WHITELIST += "CVE-2015-2150"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-2666"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-2672"
+
+# fixed-version: Fixed after version 4.0rc6
+CVE_CHECK_WHITELIST += "CVE-2015-2686"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-2830"
+
+# CVE-2015-2877 has no known resolution
+
+# fixed-version: Fixed after version 4.0rc7
+CVE_CHECK_WHITELIST += "CVE-2015-2922"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-2925"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-3212"
+
+# fixed-version: Fixed after version 2.6.33rc8
+CVE_CHECK_WHITELIST += "CVE-2015-3214"
+
+# fixed-version: Fixed after version 4.2rc2
+CVE_CHECK_WHITELIST += "CVE-2015-3288"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-3290"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-3291"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_WHITELIST += "CVE-2015-3331"
+
+# Skipping CVE-2015-3332, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-3339"
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_WHITELIST += "CVE-2015-3636"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4001"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4002"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-4003"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4004"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4036"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4167"
+
+# fixed-version: Fixed after version 3.13rc5
+CVE_CHECK_WHITELIST += "CVE-2015-4170"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4176"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4177"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4178"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-4692"
+
+# fixed-version: Fixed after version 4.1rc6
+CVE_CHECK_WHITELIST += "CVE-2015-4700"
+
+# fixed-version: Fixed after version 4.2rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5156"
+
+# fixed-version: Fixed after version 4.2rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5157"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5257"
+
+# fixed-version: Fixed after version 4.3rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5283"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5307"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5327"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5364"
+
+# fixed-version: Fixed after version 4.1rc7
+CVE_CHECK_WHITELIST += "CVE-2015-5366"
+
+# fixed-version: Fixed after version 4.2rc6
+CVE_CHECK_WHITELIST += "CVE-2015-5697"
+
+# fixed-version: Fixed after version 4.1rc3
+CVE_CHECK_WHITELIST += "CVE-2015-5706"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-5707"
+
+# fixed-version: Fixed after version 4.2rc5
+CVE_CHECK_WHITELIST += "CVE-2015-6252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-6526"
+
+# CVE-2015-6619 has no known resolution
+
+# CVE-2015-6646 has no known resolution
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-6937"
+
+# Skipping CVE-2015-7312, no affected_versions
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7509"
+
+# fixed-version: Fixed after version 4.4rc7
+CVE_CHECK_WHITELIST += "CVE-2015-7513"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-7515"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_WHITELIST += "CVE-2015-7550"
+
+# Skipping CVE-2015-7553, no affected_versions
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2015-7566"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_WHITELIST += "CVE-2015-7613"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7799"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2015-7833"
+
+# Skipping CVE-2015-7837, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2015-7872"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7884"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-7885"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2015-7990"
+
+# Skipping CVE-2015-8019, no affected_versions
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8104"
+
+# fixed-version: Fixed after version 4.0rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8215"
+
+# fixed-version: Fixed after version 2.6.34rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8324"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8374"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8539"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8543"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8550"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8551"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8552"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8553"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8569"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8575"
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2015-8660"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8709"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8746"
+
+# fixed-version: Fixed after version 4.3rc4
+CVE_CHECK_WHITELIST += "CVE-2015-8767"
+
+# fixed-version: Fixed after version 4.4rc5
+CVE_CHECK_WHITELIST += "CVE-2015-8785"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8787"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8812"
+
+# fixed-version: Fixed after version 4.4rc6
+CVE_CHECK_WHITELIST += "CVE-2015-8816"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8830"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8839"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8844"
+
+# fixed-version: Fixed after version 4.4rc3
+CVE_CHECK_WHITELIST += "CVE-2015-8845"
+
+# Skipping CVE-2015-8937, no affected_versions
+
+# Skipping CVE-2015-8938, no affected_versions
+
+# Skipping CVE-2015-8939, no affected_versions
+
+# Skipping CVE-2015-8940, no affected_versions
+
+# Skipping CVE-2015-8941, no affected_versions
+
+# Skipping CVE-2015-8942, no affected_versions
+
+# Skipping CVE-2015-8943, no affected_versions
+
+# Skipping CVE-2015-8944, no affected_versions
+
+# fixed-version: Fixed after version 4.1rc2
+CVE_CHECK_WHITELIST += "CVE-2015-8950"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8952"
+
+# fixed-version: Fixed after version 4.3
+CVE_CHECK_WHITELIST += "CVE-2015-8953"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8955"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8956"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8961"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8962"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_WHITELIST += "CVE-2015-8963"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8964"
+
+# fixed-version: Fixed after version 4.4rc8
+CVE_CHECK_WHITELIST += "CVE-2015-8966"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8967"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2015-8970"
+
+# fixed-version: Fixed after version 3.19rc7
+CVE_CHECK_WHITELIST += "CVE-2015-9004"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2015-9016"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2015-9289"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0617"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2016-0723"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0728"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-0758"
+
+# Skipping CVE-2016-0774, no affected_versions
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2016-0821"
+
+# fixed-version: Fixed after version 4.0rc5
+CVE_CHECK_WHITELIST += "CVE-2016-0823"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-10044"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10088"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-10147"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-10150"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10153"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10154"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-10200"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10208"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10229"
+
+# fixed-version: Fixed after version 4.8rc6
+CVE_CHECK_WHITELIST += "CVE-2016-10318"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10723"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10741"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10764"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10905"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_WHITELIST += "CVE-2016-10906"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-10907"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_WHITELIST += "CVE-2016-1237"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-1575"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-1576"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-1583"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2053"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2069"
+
+# fixed-version: Fixed after version 4.4
+CVE_CHECK_WHITELIST += "CVE-2016-2070"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2085"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-2117"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-2143"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2184"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2185"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2186"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-2187"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2016-2188"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2383"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2384"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2543"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2544"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2545"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2546"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2547"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2548"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2549"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2016-2550"
+
+# fixed-version: Fixed after version 4.5rc2
+CVE_CHECK_WHITELIST += "CVE-2016-2782"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2016-2847"
+
+# Skipping CVE-2016-2853, no affected_versions
+
+# Skipping CVE-2016-2854, no affected_versions
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-3044"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3070"
+
+# fixed-version: Fixed after version 4.6rc2
+CVE_CHECK_WHITELIST += "CVE-2016-3134"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3135"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3136"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3137"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3138"
+
+# fixed-version: Fixed after version 3.17rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3139"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3140"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3156"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3157"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3672"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3689"
+
+# Skipping CVE-2016-3695, no affected_versions
+
+# Skipping CVE-2016-3699, no affected_versions
+
+# Skipping CVE-2016-3707, no affected_versions
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-3713"
+
+# CVE-2016-3775 has no known resolution
+
+# CVE-2016-3802 has no known resolution
+
+# CVE-2016-3803 has no known resolution
+
+# fixed-version: Fixed after version 4.4rc4
+CVE_CHECK_WHITELIST += "CVE-2016-3841"
+
+# fixed-version: Fixed after version 4.8rc2
+CVE_CHECK_WHITELIST += "CVE-2016-3857"
+
+# fixed-version: Fixed after version 4.5
+CVE_CHECK_WHITELIST += "CVE-2016-3951"
+
+# fixed-version: Fixed after version 4.6rc3
+CVE_CHECK_WHITELIST += "CVE-2016-3955"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-3961"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4440"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_WHITELIST += "CVE-2016-4470"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4482"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4485"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4486"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4557"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-4558"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4565"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-4568"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4569"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4578"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4580"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-4581"
+
+# fixed-version: Fixed after version 4.7rc4
+CVE_CHECK_WHITELIST += "CVE-2016-4794"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4805"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-4913"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4951"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4997"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-4998"
+
+# fixed-version: Fixed after version 4.9rc2
+CVE_CHECK_WHITELIST += "CVE-2016-5195"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-5243"
+
+# fixed-version: Fixed after version 4.7rc3
+CVE_CHECK_WHITELIST += "CVE-2016-5244"
+
+# Skipping CVE-2016-5340, no affected_versions
+
+# Skipping CVE-2016-5342, no affected_versions
+
+# Skipping CVE-2016-5343, no affected_versions
+
+# Skipping CVE-2016-5344, no affected_versions
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-5400"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-5412"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-5696"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-5728"
+
+# fixed-version: Fixed after version 4.7rc6
+CVE_CHECK_WHITELIST += "CVE-2016-5828"
+
+# fixed-version: Fixed after version 4.7rc5
+CVE_CHECK_WHITELIST += "CVE-2016-5829"
+
+# CVE-2016-5870 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2016-6130"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6136"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-6156"
+
+# fixed-version: Fixed after version 4.7
+CVE_CHECK_WHITELIST += "CVE-2016-6162"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-6187"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6197"
+
+# fixed-version: Fixed after version 4.6
+CVE_CHECK_WHITELIST += "CVE-2016-6198"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6213"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6327"
+
+# fixed-version: Fixed after version 4.8rc3
+CVE_CHECK_WHITELIST += "CVE-2016-6480"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6516"
+
+# Skipping CVE-2016-6753, no affected_versions
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6786"
+
+# fixed-version: Fixed after version 4.0rc1
+CVE_CHECK_WHITELIST += "CVE-2016-6787"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_WHITELIST += "CVE-2016-6828"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-7039"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_WHITELIST += "CVE-2016-7042"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7097"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7117"
+
+# Skipping CVE-2016-7118, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7425"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7910"
+
+# fixed-version: Fixed after version 4.7rc7
+CVE_CHECK_WHITELIST += "CVE-2016-7911"
+
+# fixed-version: Fixed after version 4.6rc5
+CVE_CHECK_WHITELIST += "CVE-2016-7912"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7913"
+
+# fixed-version: Fixed after version 4.6rc4
+CVE_CHECK_WHITELIST += "CVE-2016-7914"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-7915"
+
+# fixed-version: Fixed after version 4.6rc7
+CVE_CHECK_WHITELIST += "CVE-2016-7916"
+
+# fixed-version: Fixed after version 4.5rc6
+CVE_CHECK_WHITELIST += "CVE-2016-7917"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-8399"
+
+# Skipping CVE-2016-8401, no affected_versions
+
+# Skipping CVE-2016-8402, no affected_versions
+
+# Skipping CVE-2016-8403, no affected_versions
+
+# Skipping CVE-2016-8404, no affected_versions
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2016-8405"
+
+# Skipping CVE-2016-8406, no affected_versions
+
+# Skipping CVE-2016-8407, no affected_versions
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-8630"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8632"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-8633"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8636"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_WHITELIST += "CVE-2016-8645"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2016-8646"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-8650"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-8655"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-8658"
+
+# CVE-2016-8660 has no known resolution
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-8666"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9083"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9084"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9120"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9178"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9191"
+
+# fixed-version: Fixed after version 4.9rc3
+CVE_CHECK_WHITELIST += "CVE-2016-9313"
+
+# fixed-version: Fixed after version 4.9rc4
+CVE_CHECK_WHITELIST += "CVE-2016-9555"
+
+# fixed-version: Fixed after version 4.9
+CVE_CHECK_WHITELIST += "CVE-2016-9576"
+
+# fixed-version: Fixed after version 4.10rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9588"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9604"
+
+# Skipping CVE-2016-9644, no affected_versions
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9685"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9754"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9755"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9756"
+
+# fixed-version: Fixed after version 4.9rc7
+CVE_CHECK_WHITELIST += "CVE-2016-9777"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9793"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9794"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2016-9806"
+
+# fixed-version: Fixed after version 4.9rc8
+CVE_CHECK_WHITELIST += "CVE-2016-9919"
+
+# Skipping CVE-2017-0403, no affected_versions
+
+# Skipping CVE-2017-0404, no affected_versions
+
+# Skipping CVE-2017-0426, no affected_versions
+
+# Skipping CVE-2017-0427, no affected_versions
+
+# CVE-2017-0507 has no known resolution
+
+# CVE-2017-0508 has no known resolution
+
+# Skipping CVE-2017-0510, no affected_versions
+
+# Skipping CVE-2017-0528, no affected_versions
+
+# Skipping CVE-2017-0537, no affected_versions
+
+# CVE-2017-0564 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0605"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0627"
+
+# CVE-2017-0630 has no known resolution
+
+# CVE-2017-0749 has no known resolution
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2017-0750"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-0786"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-0861"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000111"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000112"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000251"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000252"
+
+# fixed-version: Fixed after version 4.1rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000253"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000255"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-1000363"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_WHITELIST += "CVE-2017-1000364"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-1000365"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000370"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-1000371"
+
+# fixed-version: Fixed after version 4.12rc6
+CVE_CHECK_WHITELIST += "CVE-2017-1000379"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-1000380"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-1000405"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-1000407"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-1000410"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10661"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10662"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10663"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-10810"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-10911"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11089"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11176"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-11472"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_WHITELIST += "CVE-2017-11473"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-11600"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-12134"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-12146"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2017-12153"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-12154"
+
+# fixed-version: Fixed after version 4.9rc6
+CVE_CHECK_WHITELIST += "CVE-2017-12168"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-12188"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-12190"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-12192"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-12193"
+
+# fixed-version: Fixed after version 4.13rc4
+CVE_CHECK_WHITELIST += "CVE-2017-12762"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-13080"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13166"
+
+# fixed-version: Fixed after version 4.5rc4
+CVE_CHECK_WHITELIST += "CVE-2017-13167"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2017-13168"
+
+# fixed-version: Fixed after version 4.5rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13215"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-13216"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2017-13220"
+
+# CVE-2017-13221 has no known resolution
+
+# CVE-2017-13222 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-13305"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-13686"
+
+# CVE-2017-13693 has no known resolution
+
+# CVE-2017-13694 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13695"
+
+# fixed-version: Fixed after version 4.3rc1
+CVE_CHECK_WHITELIST += "CVE-2017-13715"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14051"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14106"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-14140"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14156"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-14340"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14489"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-14497"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-14954"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2017-14991"
+
+# fixed-version: Fixed after version 4.9rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15102"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15115"
+
+# fixed-version: Fixed after version 4.2rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15116"
+
+# fixed-version: Fixed after version 3.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-15121"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-15126"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15127"
+
+# fixed-version: Fixed after version 4.14rc8
+CVE_CHECK_WHITELIST += "CVE-2017-15128"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15129"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15265"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-15274"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15299"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-15306"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-15537"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-15649"
+
+# fixed-version: Fixed after version 3.19rc3
+CVE_CHECK_WHITELIST += "CVE-2017-15868"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-15951"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16525"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16526"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16527"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16528"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16529"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16530"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16531"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16532"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16533"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16534"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-16535"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16536"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16537"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16538"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-16643"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16644"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2017-16645"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16646"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16647"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16648"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16649"
+
+# fixed-version: Fixed after version 4.14
+CVE_CHECK_WHITELIST += "CVE-2017-16650"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16911"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16912"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16913"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-16914"
+
+# fixed-version: Fixed after version 4.14rc7
+CVE_CHECK_WHITELIST += "CVE-2017-16939"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-16994"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16995"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-16996"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-17052"
+
+# fixed-version: Fixed after version 4.13rc7
+CVE_CHECK_WHITELIST += "CVE-2017-17053"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17448"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17449"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17450"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17558"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17712"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17741"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17805"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-17806"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-17807"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17852"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17853"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17854"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17855"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17856"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17857"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-17862"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17863"
+
+# fixed-version: Fixed after version 4.15rc5
+CVE_CHECK_WHITELIST += "CVE-2017-17864"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2017-17975"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_WHITELIST += "CVE-2017-18017"
+
+# fixed-version: Fixed after version 4.15rc7
+CVE_CHECK_WHITELIST += "CVE-2017-18075"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18079"
+
+# CVE-2017-18169 has no known resolution
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18174"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18193"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-18200"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18202"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18203"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18204"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18208"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18216"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18218"
+
+# fixed-version: Fixed after version 4.12rc4
+CVE_CHECK_WHITELIST += "CVE-2017-18221"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18222"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18224"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18232"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18241"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18249"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18255"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18257"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2017-18261"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-18270"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2017-18344"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-18360"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2017-18379"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18509"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18549"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18550"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2017-18551"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-18552"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2017-18595"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-2583"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-2584"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2596"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-2618"
+
+# fixed-version: Fixed after version 2.6.25rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2634"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-2636"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2017-2647"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-2671"
+
+# fixed-version: Fixed after version 4.14rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5123"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5546"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5547"
+
+# fixed-version: Fixed after version 4.10rc5
+CVE_CHECK_WHITELIST += "CVE-2017-5548"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5549"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5550"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-5551"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2017-5576"
+
+# fixed-version: Fixed after version 4.10rc6
+CVE_CHECK_WHITELIST += "CVE-2017-5577"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5669"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5715"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5753"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5754"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5897"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5967"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5970"
+
+# fixed-version: Fixed after version 4.4rc1
+CVE_CHECK_WHITELIST += "CVE-2017-5972"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-5986"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-6001"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6074"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-6214"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6345"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6346"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6347"
+
+# fixed-version: Fixed after version 4.10
+CVE_CHECK_WHITELIST += "CVE-2017-6348"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6353"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-6874"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2017-6951"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7184"
+
+# fixed-version: Fixed after version 4.11rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7187"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7261"
+
+# fixed-version: Fixed after version 4.10rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7273"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7277"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7294"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7308"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-7346"
+
+# CVE-2017-7369 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-7374"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7472"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7477"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7482"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7487"
+
+# fixed-version: Fixed after version 4.7rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7495"
+
+# fixed-version: Fixed after version 4.12rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7518"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7533"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-7541"
+
+# fixed-version: Fixed after version 4.13rc2
+CVE_CHECK_WHITELIST += "CVE-2017-7542"
+
+# fixed-version: Fixed after version 4.13
+CVE_CHECK_WHITELIST += "CVE-2017-7558"
+
+# fixed-version: Fixed after version 4.11rc6
+CVE_CHECK_WHITELIST += "CVE-2017-7616"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7618"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7645"
+
+# fixed-version: Fixed after version 4.11rc7
+CVE_CHECK_WHITELIST += "CVE-2017-7889"
+
+# fixed-version: Fixed after version 4.11
+CVE_CHECK_WHITELIST += "CVE-2017-7895"
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2017-7979"
+
+# fixed-version: Fixed after version 4.11rc4
+CVE_CHECK_WHITELIST += "CVE-2017-8061"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8062"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8063"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8064"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8066"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8067"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8068"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8069"
+
+# fixed-version: Fixed after version 4.10rc8
+CVE_CHECK_WHITELIST += "CVE-2017-8070"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_WHITELIST += "CVE-2017-8071"
+
+# fixed-version: Fixed after version 4.10rc7
+CVE_CHECK_WHITELIST += "CVE-2017-8072"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8106"
+
+# fixed-version: Fixed after version 3.19rc6
+CVE_CHECK_WHITELIST += "CVE-2017-8240"
+
+# CVE-2017-8242 has no known resolution
+
+# CVE-2017-8244 has no known resolution
+
+# CVE-2017-8245 has no known resolution
+
+# CVE-2017-8246 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8797"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2017-8824"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8831"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-8890"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8924"
+
+# fixed-version: Fixed after version 4.11rc2
+CVE_CHECK_WHITELIST += "CVE-2017-8925"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9059"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9074"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9075"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9076"
+
+# fixed-version: Fixed after version 4.12rc2
+CVE_CHECK_WHITELIST += "CVE-2017-9077"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9150"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-9211"
+
+# fixed-version: Fixed after version 4.12rc3
+CVE_CHECK_WHITELIST += "CVE-2017-9242"
+
+# fixed-version: Fixed after version 4.12rc5
+CVE_CHECK_WHITELIST += "CVE-2017-9605"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2017-9725"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9984"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9985"
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2017-9986"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-1000004"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1000026"
+
+# fixed-version: Fixed after version 4.15
+CVE_CHECK_WHITELIST += "CVE-2018-1000028"
+
+# fixed-version: Fixed after version 4.16
+CVE_CHECK_WHITELIST += "CVE-2018-1000199"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2018-1000200"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1000204"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-10021"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-10074"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10087"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10124"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10322"
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10323"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-1065"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1066"
+
+# fixed-version: Fixed after version 4.13rc6
+CVE_CHECK_WHITELIST += "CVE-2018-10675"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-1068"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10840"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10853"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1087"
+
+# CVE-2018-10872 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10876"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10877"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10878"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10879"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10880"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10881"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10882"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-10883"
+
+# fixed-version: Fixed after version 2.6.36rc1
+CVE_CHECK_WHITELIST += "CVE-2018-10901"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_WHITELIST += "CVE-2018-10902"
+
+# fixed-version: Fixed after version 4.14rc2
+CVE_CHECK_WHITELIST += "CVE-2018-1091"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1092"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1093"
+
+# fixed-version: Fixed after version 4.13rc5
+CVE_CHECK_WHITELIST += "CVE-2018-10938"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1094"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-10940"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1095"
+
+# fixed-version: Fixed after version 4.17rc2
+CVE_CHECK_WHITELIST += "CVE-2018-1108"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1118"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_WHITELIST += "CVE-2018-1120"
+
+# CVE-2018-1121 has no known resolution
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-11232"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1128"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-1129"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-1130"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-11412"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-11506"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2018-11508"
+
+# CVE-2018-11987 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12126"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12127"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12130"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12207"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12232"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12233"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12633"
+
+# fixed-version: Fixed after version 4.18rc2
+CVE_CHECK_WHITELIST += "CVE-2018-12714"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12896"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-12904"
+
+# CVE-2018-12928 has no known resolution
+
+# CVE-2018-12929 has no known resolution
+
+# CVE-2018-12930 has no known resolution
+
+# CVE-2018-12931 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13053"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13093"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13094"
+
+# fixed-version: Fixed after version 4.18rc3
+CVE_CHECK_WHITELIST += "CVE-2018-13095"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13096"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13097"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13098"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13099"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13100"
+
+# fixed-version: Fixed after version 4.18rc4
+CVE_CHECK_WHITELIST += "CVE-2018-13405"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-13406"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14609"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14610"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14611"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14612"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14613"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14614"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14615"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14616"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14617"
+
+# fixed-version: Fixed after version 4.15rc4
+CVE_CHECK_WHITELIST += "CVE-2018-14619"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-14625"
+
+# fixed-version: Fixed after version 4.19rc6
+CVE_CHECK_WHITELIST += "CVE-2018-14633"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14634"
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_WHITELIST += "CVE-2018-14641"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-14646"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_WHITELIST += "CVE-2018-14656"
+
+# fixed-version: Fixed after version 4.18rc8
+CVE_CHECK_WHITELIST += "CVE-2018-14678"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-14734"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-15471"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-15572"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-15594"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16276"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2018-16597"
+
+# fixed-version: Fixed after version 4.19rc2
+CVE_CHECK_WHITELIST += "CVE-2018-16658"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16862"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_WHITELIST += "CVE-2018-16871"
+
+# fixed-version: Fixed after version 5.0rc5
+CVE_CHECK_WHITELIST += "CVE-2018-16880"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2018-16882"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-16884"
+
+# CVE-2018-16885 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc4
+CVE_CHECK_WHITELIST += "CVE-2018-17182"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-17972"
+
+# CVE-2018-17977 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-18021"
+
+# fixed-version: Fixed after version 4.19
+CVE_CHECK_WHITELIST += "CVE-2018-18281"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2018-18386"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-18397"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2018-18445"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-18559"
+
+# CVE-2018-18653 has no known resolution
+
+# fixed-version: Fixed after version 4.17rc4
+CVE_CHECK_WHITELIST += "CVE-2018-18690"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-18710"
+
+# fixed-version: Fixed after version 4.20rc2
+CVE_CHECK_WHITELIST += "CVE-2018-18955"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-19406"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2018-19407"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-19824"
+
+# fixed-version: Fixed after version 4.20rc3
+CVE_CHECK_WHITELIST += "CVE-2018-19854"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2018-19985"
+
+# fixed-version: Fixed after version 4.20rc6
+CVE_CHECK_WHITELIST += "CVE-2018-20169"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-20449"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20509"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-20510"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_WHITELIST += "CVE-2018-20511"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20669"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20784"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20836"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20854"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20855"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20856"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20961"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-20976"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2018-21008"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-25015"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-25020"
+
+# CVE-2018-3574 has no known resolution
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3620"
+
+# fixed-version: Fixed after version 4.17rc7
+CVE_CHECK_WHITELIST += "CVE-2018-3639"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3646"
+
+# fixed-version: Fixed after version 3.7rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3665"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-3693"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5332"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5333"
+
+# fixed-version: Fixed after version 4.15rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5344"
+
+# fixed-version: Fixed after version 4.18rc7
+CVE_CHECK_WHITELIST += "CVE-2018-5390"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5391"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-5703"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5750"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5803"
+
+# fixed-version: Fixed after version 4.17rc6
+CVE_CHECK_WHITELIST += "CVE-2018-5814"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-5848"
+
+# Skipping CVE-2018-5856, no affected_versions
+
+# fixed-version: Fixed after version 4.11rc8
+CVE_CHECK_WHITELIST += "CVE-2018-5873"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-5953"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-5995"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-6412"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-6554"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2018-6555"
+
+# CVE-2018-6559 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2018-6927"
+
+# fixed-version: Fixed after version 4.14rc6
+CVE_CHECK_WHITELIST += "CVE-2018-7191"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7273"
+
+# fixed-version: Fixed after version 4.11rc1
+CVE_CHECK_WHITELIST += "CVE-2018-7480"
+
+# fixed-version: Fixed after version 4.15rc3
+CVE_CHECK_WHITELIST += "CVE-2018-7492"
+
+# fixed-version: Fixed after version 4.16rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7566"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-7740"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2018-7754"
+
+# fixed-version: Fixed after version 4.19rc5
+CVE_CHECK_WHITELIST += "CVE-2018-7755"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-7757"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2018-7995"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-8043"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2018-8087"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8781"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8822"
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2018-8897"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9363"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9385"
+
+# fixed-version: Fixed after version 4.17rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9415"
+
+# fixed-version: Fixed after version 4.6rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9422"
+
+# fixed-version: Fixed after version 4.15rc6
+CVE_CHECK_WHITELIST += "CVE-2018-9465"
+
+# fixed-version: Fixed after version 4.18rc5
+CVE_CHECK_WHITELIST += "CVE-2018-9516"
+
+# fixed-version: Fixed after version 4.14rc1
+CVE_CHECK_WHITELIST += "CVE-2018-9517"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2018-9518"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2018-9568"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-0136"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0145"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0146"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0147"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0148"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-0149"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-0154"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-0155"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10124"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-10126"
+
+# CVE-2019-10140 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10142"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_WHITELIST += "CVE-2019-10207"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-10220"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-10638"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-10639"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-11085"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11091"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-11135"
+
+# fixed-version: Fixed after version 4.8rc5
+CVE_CHECK_WHITELIST += "CVE-2019-11190"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11191"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-1125"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11477"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11478"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11479"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-11486"
+
+# fixed-version: Fixed after version 5.1rc5
+CVE_CHECK_WHITELIST += "CVE-2019-11487"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-11599"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-11683"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11810"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11811"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-11815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11833"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-11884"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12378"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12379"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12380"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-12381"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12382"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12454"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12455"
+
+# CVE-2019-12456 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12614"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-12615"
+
+# fixed-version: Fixed after version 5.2rc7
+CVE_CHECK_WHITELIST += "CVE-2019-12817"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-12818"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_WHITELIST += "CVE-2019-12819"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2019-12881"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-12984"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-13233"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-13272"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-13631"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-13648"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14283"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14284"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-14615"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14763"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14814"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14815"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-14821"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-14835"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-14895"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2019-14896"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2019-14897"
+
+# CVE-2019-14898 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.11
+CVE_CHECK_WHITELIST += "CVE-2019-14901"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_WHITELIST += "CVE-2019-15030"
+
+# fixed-version: Fixed after version 5.3rc8
+CVE_CHECK_WHITELIST += "CVE-2019-15031"
+
+# fixed-version: Fixed after version 5.2rc2
+CVE_CHECK_WHITELIST += "CVE-2019-15090"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15098"
+
+# cpe-stable-backport: Backported in 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15099"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-15117"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-15118"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15211"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15212"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15213"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15214"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15215"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-15216"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15217"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15218"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15219"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15220"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-15221"
+
+# fixed-version: Fixed after version 5.3rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15222"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15223"
+
+# CVE-2019-15239 has no known resolution
+
+# CVE-2019-15290 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-15291"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15292"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-15504"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15505"
+
+# fixed-version: Fixed after version 5.3rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15538"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-15666"
+
+# CVE-2019-15791 has no known resolution
+
+# CVE-2019-15792 has no known resolution
+
+# CVE-2019-15793 has no known resolution
+
+# CVE-2019-15794 needs backporting (fixed from 5.12)
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15807"
+
+# CVE-2019-15902 has no known resolution
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15916"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15917"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15918"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15919"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-15920"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-15921"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15922"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15923"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-15924"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15925"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-15926"
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_WHITELIST += "CVE-2019-15927"
+
+# CVE-2019-16089 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16229"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16230"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-16231"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-16232"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_WHITELIST += "CVE-2019-16233"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-16234"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16413"
+
+# fixed-version: Fixed after version 5.3rc7
+CVE_CHECK_WHITELIST += "CVE-2019-16714"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-16746"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16921"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-16994"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-16995"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17052"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17053"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17054"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17055"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17056"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-17075"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-17133"
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-17351"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-17666"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-18198"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-18282"
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-18660"
+
+# fixed-version: Fixed after version 4.17rc5
+CVE_CHECK_WHITELIST += "CVE-2019-18675"
+
+# CVE-2019-18680 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.1
+CVE_CHECK_WHITELIST += "CVE-2019-18683"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-18786"
+
+# fixed-version: Fixed after version 5.1rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18805"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18806"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18807"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2019-18808"
+
+# cpe-stable-backport: Backported in 5.4.9
+CVE_CHECK_WHITELIST += "CVE-2019-18809"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-18810"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18811"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-18812"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-18813"
+
+# cpe-stable-backport: Backported in 5.4.43
+CVE_CHECK_WHITELIST += "CVE-2019-18814"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-18885"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19036"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19037"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2019-19039"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2019-19043"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19044"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19045"
+
+# cpe-stable-backport: Backported in 5.4.15
+CVE_CHECK_WHITELIST += "CVE-2019-19046"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19047"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19048"
+
+# fixed-version: Fixed after version 5.4rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19049"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19050"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19051"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19052"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19053"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2019-19054"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19055"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19056"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19057"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19058"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19059"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19060"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19061"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19062"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19063"
+
+# cpe-stable-backport: Backported in 5.4.13
+CVE_CHECK_WHITELIST += "CVE-2019-19064"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19065"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19066"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19067"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19068"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19069"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19070"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19071"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19072"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19073"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19074"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19075"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19076"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19077"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-19078"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-19079"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19080"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19081"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19082"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19083"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19227"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2019-19241"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19252"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19318"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19319"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19332"
+
+# cpe-stable-backport: Backported in 5.4.3
+CVE_CHECK_WHITELIST += "CVE-2019-19338"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2019-19377"
+
+# CVE-2019-19378 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.4
+CVE_CHECK_WHITELIST += "CVE-2019-19447"
+
+# cpe-stable-backport: Backported in 5.4.60
+CVE_CHECK_WHITELIST += "CVE-2019-19448"
+
+# CVE-2019-19449 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.45
+CVE_CHECK_WHITELIST += "CVE-2019-19462"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19523"
+
+# fixed-version: Fixed after version 5.4rc8
+CVE_CHECK_WHITELIST += "CVE-2019-19524"
+
+# fixed-version: Fixed after version 5.4rc2
+CVE_CHECK_WHITELIST += "CVE-2019-19525"
+
+# fixed-version: Fixed after version 5.4rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19526"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19527"
+
+# fixed-version: Fixed after version 5.4rc3
+CVE_CHECK_WHITELIST += "CVE-2019-19528"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19529"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19530"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19531"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19532"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19533"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19534"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19535"
+
+# fixed-version: Fixed after version 5.3rc4
+CVE_CHECK_WHITELIST += "CVE-2019-19536"
+
+# fixed-version: Fixed after version 5.3rc5
+CVE_CHECK_WHITELIST += "CVE-2019-19537"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19543"
+
+# cpe-stable-backport: Backported in 5.4.2
+CVE_CHECK_WHITELIST += "CVE-2019-19602"
+
+# cpe-stable-backport: Backported in 5.4.2
+CVE_CHECK_WHITELIST += "CVE-2019-19767"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2019-19768"
+
+# cpe-stable-backport: Backported in 5.4.28
+CVE_CHECK_WHITELIST += "CVE-2019-19769"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2019-19770"
+
+# fixed-version: Fixed after version 5.4rc7
+CVE_CHECK_WHITELIST += "CVE-2019-19807"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19813"
+
+# CVE-2019-19814 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19815"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19816"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19922"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-19927"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-19947"
+
+# cpe-stable-backport: Backported in 5.4.9
+CVE_CHECK_WHITELIST += "CVE-2019-19965"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-19966"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-1999"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-20054"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20095"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-20096"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2024"
+
+# fixed-version: Fixed after version 4.20rc5
+CVE_CHECK_WHITELIST += "CVE-2019-2025"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20422"
+
+# fixed-version: Fixed after version 4.8rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2054"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2019-20636"
+
+# CVE-2019-20794 has no known resolution
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20806"
+
+# cpe-stable-backport: Backported in 5.4.48
+CVE_CHECK_WHITELIST += "CVE-2019-20810"
+
+# fixed-version: Fixed after version 5.1rc3
+CVE_CHECK_WHITELIST += "CVE-2019-20811"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2019-20812"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2019-20908"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-20934"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2101"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2181"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2019-2182"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-2213"
+
+# fixed-version: Fixed after version 5.3rc2
+CVE_CHECK_WHITELIST += "CVE-2019-2214"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2019-2215"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-25044"
+
+# fixed-version: Fixed after version 5.1
+CVE_CHECK_WHITELIST += "CVE-2019-25045"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-25160"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2019-25162"
+
+# cpe-stable-backport: Backported in 5.4.19
+CVE_CHECK_WHITELIST += "CVE-2019-3016"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3459"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3460"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-3701"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3819"
+
+# fixed-version: Fixed after version 3.18rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3837"
+
+# fixed-version: Fixed after version 5.2rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3846"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3874"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3882"
+
+# fixed-version: Fixed after version 5.1rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3887"
+
+# fixed-version: Fixed after version 5.1rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3892"
+
+# fixed-version: Fixed after version 2.6.35rc1
+CVE_CHECK_WHITELIST += "CVE-2019-3896"
+
+# fixed-version: Fixed after version 5.2rc4
+CVE_CHECK_WHITELIST += "CVE-2019-3900"
+
+# fixed-version: Fixed after version 4.6rc6
+CVE_CHECK_WHITELIST += "CVE-2019-3901"
+
+# fixed-version: Fixed after version 5.3
+CVE_CHECK_WHITELIST += "CVE-2019-5108"
+
+# Skipping CVE-2019-5489, no affected_versions
+
+# fixed-version: Fixed after version 5.0rc2
+CVE_CHECK_WHITELIST += "CVE-2019-6133"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-6974"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-7221"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-7222"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2019-7308"
+
+# fixed-version: Fixed after version 5.0rc8
+CVE_CHECK_WHITELIST += "CVE-2019-8912"
+
+# fixed-version: Fixed after version 5.0rc6
+CVE_CHECK_WHITELIST += "CVE-2019-8956"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-8980"
+
+# fixed-version: Fixed after version 5.0rc4
+CVE_CHECK_WHITELIST += "CVE-2019-9003"
+
+# fixed-version: Fixed after version 5.0rc7
+CVE_CHECK_WHITELIST += "CVE-2019-9162"
+
+# fixed-version: Fixed after version 5.0
+CVE_CHECK_WHITELIST += "CVE-2019-9213"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9245"
+
+# fixed-version: Fixed after version 4.15rc2
+CVE_CHECK_WHITELIST += "CVE-2019-9444"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9445"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9453"
+
+# fixed-version: Fixed after version 4.15rc9
+CVE_CHECK_WHITELIST += "CVE-2019-9454"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9455"
+
+# fixed-version: Fixed after version 4.16rc6
+CVE_CHECK_WHITELIST += "CVE-2019-9456"
+
+# fixed-version: Fixed after version 4.13rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9457"
+
+# fixed-version: Fixed after version 4.19rc7
+CVE_CHECK_WHITELIST += "CVE-2019-9458"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9466"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9500"
+
+# fixed-version: Fixed after version 5.1rc1
+CVE_CHECK_WHITELIST += "CVE-2019-9503"
+
+# fixed-version: Fixed after version 5.2
+CVE_CHECK_WHITELIST += "CVE-2019-9506"
+
+# fixed-version: Fixed after version 5.1rc2
+CVE_CHECK_WHITELIST += "CVE-2019-9857"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-0009"
+
+# fixed-version: Fixed after version 4.16rc3
+CVE_CHECK_WHITELIST += "CVE-2020-0030"
+
+# cpe-stable-backport: Backported in 5.4.4
+CVE_CHECK_WHITELIST += "CVE-2020-0041"
+
+# fixed-version: Fixed after version 4.3rc7
+CVE_CHECK_WHITELIST += "CVE-2020-0066"
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-0067"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-0110"
+
+# cpe-stable-backport: Backported in 5.4.39
+CVE_CHECK_WHITELIST += "CVE-2020-0255"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2020-0305"
+
+# CVE-2020-0347 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.19
+CVE_CHECK_WHITELIST += "CVE-2020-0404"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-0423"
+
+# cpe-stable-backport: Backported in 5.4.7
+CVE_CHECK_WHITELIST += "CVE-2020-0427"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2020-0429"
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0430"
+
+# cpe-stable-backport: Backported in 5.4.12
+CVE_CHECK_WHITELIST += "CVE-2020-0431"
+
+# cpe-stable-backport: Backported in 5.4.17
+CVE_CHECK_WHITELIST += "CVE-2020-0432"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0433"
+
+# fixed-version: Fixed after version 4.19rc1
+CVE_CHECK_WHITELIST += "CVE-2020-0435"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-0444"
+
+# cpe-stable-backport: Backported in 5.4.63
+CVE_CHECK_WHITELIST += "CVE-2020-0465"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-0466"
+
+# cpe-stable-backport: Backported in 5.4.46
+CVE_CHECK_WHITELIST += "CVE-2020-0543"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-10135"
+
+# cpe-stable-backport: Backported in 5.4.8
+CVE_CHECK_WHITELIST += "CVE-2020-10690"
+
+# CVE-2020-10708 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-10711"
+
+# fixed-version: Fixed after version 5.2rc3
+CVE_CHECK_WHITELIST += "CVE-2020-10720"
+
+# cpe-stable-backport: Backported in 5.4.44
+CVE_CHECK_WHITELIST += "CVE-2020-10732"
+
+# fixed-version: Fixed after version 3.16rc1
+CVE_CHECK_WHITELIST += "CVE-2020-10742"
+
+# cpe-stable-backport: Backported in 5.4.39
+CVE_CHECK_WHITELIST += "CVE-2020-10751"
+
+# cpe-stable-backport: Backported in 5.4.45
+CVE_CHECK_WHITELIST += "CVE-2020-10757"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10766"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10767"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-10768"
+
+# fixed-version: Fixed after version 5.0rc3
+CVE_CHECK_WHITELIST += "CVE-2020-10769"
+
+# fixed-version: Fixed after version 5.4rc6
+CVE_CHECK_WHITELIST += "CVE-2020-10773"
+
+# CVE-2020-10774 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2020-10781"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-10942"
+
+# cpe-stable-backport: Backported in 5.4.32
+CVE_CHECK_WHITELIST += "CVE-2020-11494"
+
+# cpe-stable-backport: Backported in 5.4.31
+CVE_CHECK_WHITELIST += "CVE-2020-11565"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11608"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11609"
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-11668"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2020-11669"
+
+# CVE-2020-11725 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-11884"
+
+# CVE-2020-11935 has no known resolution
+
+# fixed-version: Fixed after version 5.3rc1
+CVE_CHECK_WHITELIST += "CVE-2020-12114"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-12351"
+
+# cpe-stable-backport: Backported in 5.4.72
+CVE_CHECK_WHITELIST += "CVE-2020-12352"
+
+# CVE-2020-12362 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12363 needs backporting (fixed from 5.11rc1)
+
+# CVE-2020-12364 needs backporting (fixed from 5.11rc1)
+
+# cpe-stable-backport: Backported in 5.4.36
+CVE_CHECK_WHITELIST += "CVE-2020-12464"
+
+# cpe-stable-backport: Backported in 5.4.26
+CVE_CHECK_WHITELIST += "CVE-2020-12465"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2020-12652"
+
+# cpe-stable-backport: Backported in 5.4.20
+CVE_CHECK_WHITELIST += "CVE-2020-12653"
+
+# cpe-stable-backport: Backported in 5.4.20
+CVE_CHECK_WHITELIST += "CVE-2020-12654"
+
+# cpe-stable-backport: Backported in 5.4.50
+CVE_CHECK_WHITELIST += "CVE-2020-12655"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2020-12656"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2020-12657"
+
+# cpe-stable-backport: Backported in 5.4.35
+CVE_CHECK_WHITELIST += "CVE-2020-12659"
+
+# cpe-stable-backport: Backported in 5.4.43
+CVE_CHECK_WHITELIST += "CVE-2020-12768"
+
+# cpe-stable-backport: Backported in 5.4.17
+CVE_CHECK_WHITELIST += "CVE-2020-12769"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-12770"
+
+# cpe-stable-backport: Backported in 5.4.49
+CVE_CHECK_WHITELIST += "CVE-2020-12771"
+
+# cpe-stable-backport: Backported in 5.4.33
+CVE_CHECK_WHITELIST += "CVE-2020-12826"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-12888"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-12912"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-13143"
+
+# cpe-stable-backport: Backported in 5.4.46
+CVE_CHECK_WHITELIST += "CVE-2020-13974"
+
+# CVE-2020-14304 has no known resolution
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2020-14305"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-14314"
+
+# cpe-stable-backport: Backported in 5.4.58
+CVE_CHECK_WHITELIST += "CVE-2020-14331"
+
+# cpe-stable-backport: Backported in 5.4.78
+CVE_CHECK_WHITELIST += "CVE-2020-14351"
+
+# fixed-version: Fixed after version 4.14rc3
+CVE_CHECK_WHITELIST += "CVE-2020-14353"
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2020-14356"
+
+# cpe-stable-backport: Backported in 5.4.28
+CVE_CHECK_WHITELIST += "CVE-2020-14381"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-14385"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-14386"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-14390"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2020-14416"
+
+# cpe-stable-backport: Backported in 5.4.51
+CVE_CHECK_WHITELIST += "CVE-2020-15393"
+
+# cpe-stable-backport: Backported in 5.4.49
+CVE_CHECK_WHITELIST += "CVE-2020-15436"
+
+# cpe-stable-backport: Backported in 5.4.54
+CVE_CHECK_WHITELIST += "CVE-2020-15437"
+
+# cpe-stable-backport: Backported in 5.4.50
+CVE_CHECK_WHITELIST += "CVE-2020-15780"
+
+# CVE-2020-15802 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-15852"
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2020-16119"
+
+# CVE-2020-16120 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.57
+CVE_CHECK_WHITELIST += "CVE-2020-16166"
+
+# cpe-stable-backport: Backported in 5.4.5
+CVE_CHECK_WHITELIST += "CVE-2020-1749"
+
+# cpe-stable-backport: Backported in 5.4.51
+CVE_CHECK_WHITELIST += "CVE-2020-24394"
+
+# cpe-stable-backport: Backported in 5.4.56
+CVE_CHECK_WHITELIST += "CVE-2020-24490"
+
+# CVE-2020-24502 has no known resolution
+
+# CVE-2020-24503 has no known resolution
+
+# CVE-2020-24504 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24586"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24587"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-24588"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2020-25211"
+
+# cpe-stable-backport: Backported in 5.4.60
+CVE_CHECK_WHITELIST += "CVE-2020-25212"
+
+# CVE-2020-25220 has no known resolution
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-25221"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-25284"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-25285"
+
+# cpe-stable-backport: Backported in 5.4.102
+CVE_CHECK_WHITELIST += "CVE-2020-25639"
+
+# cpe-stable-backport: Backported in 5.4.64
+CVE_CHECK_WHITELIST += "CVE-2020-25641"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2020-25643"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2020-25645"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-25656"
+
+# CVE-2020-25661 has no known resolution
+
+# CVE-2020-25662 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-25668"
+
+# cpe-stable-backport: Backported in 5.4.79
+CVE_CHECK_WHITELIST += "CVE-2020-25669"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25670"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25671"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25672"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2020-25673"
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-25704"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-25705"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2020-26088"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26139"
+
+# CVE-2020-26140 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26141"
+
+# CVE-2020-26142 has no known resolution
+
+# CVE-2020-26143 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26145"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2020-26147"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2020-26541"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2020-26555"
+
+# CVE-2020-26556 has no known resolution
+
+# CVE-2020-26557 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2020-26558"
+
+# CVE-2020-26559 has no known resolution
+
+# CVE-2020-26560 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2020-27066"
+
+# fixed-version: Fixed after version 4.14rc4
+CVE_CHECK_WHITELIST += "CVE-2020-27067"
+
+# cpe-stable-backport: Backported in 5.4.24
+CVE_CHECK_WHITELIST += "CVE-2020-27068"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27152"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27170"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27171"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-27194"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-2732"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-27418"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27673"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27675"
+
+# cpe-stable-backport: Backported in 5.4.75
+CVE_CHECK_WHITELIST += "CVE-2020-27777"
+
+# cpe-stable-backport: Backported in 5.4.73
+CVE_CHECK_WHITELIST += "CVE-2020-27784"
+
+# cpe-stable-backport: Backported in 5.4.42
+CVE_CHECK_WHITELIST += "CVE-2020-27786"
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-27815"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2020-27820"
+
+# cpe-stable-backport: Backported in 5.4.94
+CVE_CHECK_WHITELIST += "CVE-2020-27825"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-27830"
+
+# CVE-2020-27835 needs backporting (fixed from 5.10rc6)
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-28097"
+
+# cpe-stable-backport: Backported in 5.4.89
+CVE_CHECK_WHITELIST += "CVE-2020-28374"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-28588"
+
+# cpe-stable-backport: Backported in 5.4.71
+CVE_CHECK_WHITELIST += "CVE-2020-28915"
+
+# cpe-stable-backport: Backported in 5.4.80
+CVE_CHECK_WHITELIST += "CVE-2020-28941"
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-28974"
+
+# cpe-stable-backport: Backported in 5.4.48
+CVE_CHECK_WHITELIST += "CVE-2020-29368"
+
+# cpe-stable-backport: Backported in 5.4.54
+CVE_CHECK_WHITELIST += "CVE-2020-29369"
+
+# cpe-stable-backport: Backported in 5.4.27
+CVE_CHECK_WHITELIST += "CVE-2020-29370"
+
+# cpe-stable-backport: Backported in 5.4.61
+CVE_CHECK_WHITELIST += "CVE-2020-29371"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-29372"
+
+# CVE-2020-29373 needs backporting (fixed from 5.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2020-29374"
+
+# CVE-2020-29534 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-29568"
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-29569"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-29660"
+
+# cpe-stable-backport: Backported in 5.4.83
+CVE_CHECK_WHITELIST += "CVE-2020-29661"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-35499"
+
+# CVE-2020-35501 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.76
+CVE_CHECK_WHITELIST += "CVE-2020-35508"
+
+# fixed-version: Fixed after version 4.17rc1
+CVE_CHECK_WHITELIST += "CVE-2020-35513"
+
+# cpe-stable-backport: Backported in 5.4.82
+CVE_CHECK_WHITELIST += "CVE-2020-35519"
+
+# cpe-stable-backport: Backported in 5.4.88
+CVE_CHECK_WHITELIST += "CVE-2020-36158"
+
+# CVE-2020-36310 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.131
+CVE_CHECK_WHITELIST += "CVE-2020-36311"
+
+# cpe-stable-backport: Backported in 5.4.66
+CVE_CHECK_WHITELIST += "CVE-2020-36312"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36313"
+
+# cpe-stable-backport: Backported in 5.4.88
+CVE_CHECK_WHITELIST += "CVE-2020-36322"
+
+# CVE-2020-36385 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.58
+CVE_CHECK_WHITELIST += "CVE-2020-36386"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36387"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2020-36516"
+
+# cpe-stable-backport: Backported in 5.4.30
+CVE_CHECK_WHITELIST += "CVE-2020-36557"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-36558"
+
+# CVE-2020-36691 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.86
+CVE_CHECK_WHITELIST += "CVE-2020-36694"
+
+# cpe-stable-backport: Backported in 5.4.62
+CVE_CHECK_WHITELIST += "CVE-2020-36766"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2020-36775"
+
+# fixed-version: only affects 5.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36776"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2020-36777"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36778"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36779"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36780"
+
+# CVE-2020-36781 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36782"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36783"
+
+# CVE-2020-36784 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36785"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-36786"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2020-36787"
+
+# cpe-stable-backport: Backported in 5.4.143
+CVE_CHECK_WHITELIST += "CVE-2020-3702"
+
+# cpe-stable-backport: Backported in 5.4.79
+CVE_CHECK_WHITELIST += "CVE-2020-4788"
+
+# fixed-version: Fixed after version 5.2rc1
+CVE_CHECK_WHITELIST += "CVE-2020-7053"
+
+# cpe-stable-backport: Backported in 5.4.16
+CVE_CHECK_WHITELIST += "CVE-2020-8428"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8647"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8648"
+
+# cpe-stable-backport: Backported in 5.4.25
+CVE_CHECK_WHITELIST += "CVE-2020-8649"
+
+# cpe-stable-backport: Backported in 5.4.77
+CVE_CHECK_WHITELIST += "CVE-2020-8694"
+
+# CVE-2020-8832 has no known resolution
+
+# fixed-version: Fixed after version 4.18rc1
+CVE_CHECK_WHITELIST += "CVE-2020-8834"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2020-8835"
+
+# cpe-stable-backport: Backported in 5.4.21
+CVE_CHECK_WHITELIST += "CVE-2020-8992"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-9383"
+
+# cpe-stable-backport: Backported in 5.4.23
+CVE_CHECK_WHITELIST += "CVE-2020-9391"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-0129"
+
+# cpe-stable-backport: Backported in 5.4.47
+CVE_CHECK_WHITELIST += "CVE-2021-0342"
+
+# CVE-2021-0399 has no known resolution
+
+# fixed-version: Fixed after version 4.15rc1
+CVE_CHECK_WHITELIST += "CVE-2021-0447"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2021-0448"
+
+# cpe-stable-backport: Backported in 5.4.101
+CVE_CHECK_WHITELIST += "CVE-2021-0512"
+
+# cpe-stable-backport: Backported in 5.4.68
+CVE_CHECK_WHITELIST += "CVE-2021-0605"
+
+# CVE-2021-0606 has no known resolution
+
+# CVE-2021-0695 has no known resolution
+
+# fixed-version: only affects 5.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-0707"
+
+# cpe-stable-backport: Backported in 5.4.137
+CVE_CHECK_WHITELIST += "CVE-2021-0920"
+
+# CVE-2021-0924 has no known resolution
+
+# CVE-2021-0929 needs backporting (fixed from 5.6rc1)
+
+# fixed-version: Fixed after version 4.16rc7
+CVE_CHECK_WHITELIST += "CVE-2021-0935"
+
+# CVE-2021-0936 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.113
+CVE_CHECK_WHITELIST += "CVE-2021-0937"
+
+# cpe-stable-backport: Backported in 5.4.84
+CVE_CHECK_WHITELIST += "CVE-2021-0938"
+
+# cpe-stable-backport: Backported in 5.4.110
+CVE_CHECK_WHITELIST += "CVE-2021-0941"
+
+# CVE-2021-0961 has no known resolution
+
+# fixed-version: only affects 5.9rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-1048"
+
+# CVE-2021-20177 needs backporting (fixed from 5.5rc1)
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20194"
+
+# CVE-2021-20219 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20226"
+
+# CVE-2021-20239 needs backporting (fixed from 5.9rc1)
+
+# fixed-version: Fixed after version 4.5rc5
+CVE_CHECK_WHITELIST += "CVE-2021-20261"
+
+# fixed-version: Fixed after version 4.5rc3
+CVE_CHECK_WHITELIST += "CVE-2021-20265"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-20268"
+
+# cpe-stable-backport: Backported in 5.4.59
+CVE_CHECK_WHITELIST += "CVE-2021-20292"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2021-20317"
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2021-20320"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-20321"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-20322"
+
+# cpe-stable-backport: Backported in 5.4.99
+CVE_CHECK_WHITELIST += "CVE-2021-21781"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2021-22543"
+
+# cpe-stable-backport: Backported in 5.4.113
+CVE_CHECK_WHITELIST += "CVE-2021-22555"
+
+# fixed-version: only affects 5.6 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-22600"
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-23133"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-23134"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2021-26401"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-26708"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26930"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26931"
+
+# cpe-stable-backport: Backported in 5.4.100
+CVE_CHECK_WHITELIST += "CVE-2021-26932"
+
+# CVE-2021-26934 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27363"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27364"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-27365"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-28038"
+
+# fixed-version: only affects 5.9rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28039"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-28375"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-28660"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-28688"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28691"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28711"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28712"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28713"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28714"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-28715"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28950"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28951"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-28952"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28964"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28971"
+
+# cpe-stable-backport: Backported in 5.4.108
+CVE_CHECK_WHITELIST += "CVE-2021-28972"
+
+# cpe-stable-backport: Backported in 5.4.111
+CVE_CHECK_WHITELIST += "CVE-2021-29154"
+
+# CVE-2021-29155 needs backporting (fixed from 5.12rc8)
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29264"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-29265"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29266"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29646"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29647"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29648"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29649"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-29650"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-29657"
+
+# cpe-stable-backport: Backported in 5.4.103
+CVE_CHECK_WHITELIST += "CVE-2021-30002"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-30178"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-31440"
+
+# cpe-stable-backport: Backported in 5.4.92
+CVE_CHECK_WHITELIST += "CVE-2021-3178"
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-31829"
+
+# cpe-stable-backport: Backported in 5.4.109
+CVE_CHECK_WHITELIST += "CVE-2021-31916"
+
+# CVE-2021-32078 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-32399"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-32606"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-33033"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-33034"
+
+# CVE-2021-33061 needs backporting (fixed from 5.18rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-33098"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-33135"
+
+# fixed-version: only affects 5.12rc8 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-33200"
+
+# cpe-stable-backport: Backported in 5.4.94
+CVE_CHECK_WHITELIST += "CVE-2021-3347"
+
+# cpe-stable-backport: Backported in 5.4.95
+CVE_CHECK_WHITELIST += "CVE-2021-3348"
+
+# cpe-stable-backport: Backported in 5.4.139
+CVE_CHECK_WHITELIST += "CVE-2021-33624"
+
+# fixed-version: Fixed after version 5.4rc1
+CVE_CHECK_WHITELIST += "CVE-2021-33630"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2021-33631"
+
+# cpe-stable-backport: Backported in 5.4.205
+CVE_CHECK_WHITELIST += "CVE-2021-33655"
+
+# cpe-stable-backport: Backported in 5.4.202
+CVE_CHECK_WHITELIST += "CVE-2021-33656"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-33909"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3411"
+
+# cpe-stable-backport: Backported in 5.4.62
+CVE_CHECK_WHITELIST += "CVE-2021-3428"
+
+# cpe-stable-backport: Backported in 5.4.101
+CVE_CHECK_WHITELIST += "CVE-2021-3444"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-34556"
+
+# cpe-stable-backport: Backported in 5.4.128
+CVE_CHECK_WHITELIST += "CVE-2021-34693"
+
+# cpe-stable-backport: Backported in 5.4.110
+CVE_CHECK_WHITELIST += "CVE-2021-3483"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-34866"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3489"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3490"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3491"
+
+# CVE-2021-3492 has no known resolution
+
+# CVE-2021-3493 needs backporting (fixed from 5.11rc1)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-34981"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3501"
+
+# cpe-stable-backport: Backported in 5.4.129
+CVE_CHECK_WHITELIST += "CVE-2021-35039"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-3506"
+
+# CVE-2021-3542 has no known resolution
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3543"
+
+# cpe-stable-backport: Backported in 5.4.146
+CVE_CHECK_WHITELIST += "CVE-2021-35477"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3564"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3573"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-3587"
+
+# cpe-stable-backport: Backported in 5.4.98
+CVE_CHECK_WHITELIST += "CVE-2021-3600"
+
+# cpe-stable-backport: Backported in 5.4.132
+CVE_CHECK_WHITELIST += "CVE-2021-3609"
+
+# cpe-stable-backport: Backported in 5.4.102
+CVE_CHECK_WHITELIST += "CVE-2021-3612"
+
+# cpe-stable-backport: Backported in 5.4.14
+CVE_CHECK_WHITELIST += "CVE-2021-3635"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-3640"
+
+# cpe-stable-backport: Backported in 5.4.142
+CVE_CHECK_WHITELIST += "CVE-2021-3653"
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2021-3655"
+
+# cpe-stable-backport: Backported in 5.4.142
+CVE_CHECK_WHITELIST += "CVE-2021-3656"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2021-3659"
+
+# CVE-2021-3669 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-3679"
+
+# CVE-2021-3714 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.29
+CVE_CHECK_WHITELIST += "CVE-2021-3715"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-37159"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-3732"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-3736"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-3739"
+
+# cpe-stable-backport: Backported in 5.4.128
+CVE_CHECK_WHITELIST += "CVE-2021-3743"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-3744"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-3752"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-3753"
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-37576"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2021-3759"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-3760"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-3764"
+
+# cpe-stable-backport: Backported in 5.4.157
+CVE_CHECK_WHITELIST += "CVE-2021-3772"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-38160"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38166"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-38198"
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-38199"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38200"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38201"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38202"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38203"
+
+# cpe-stable-backport: Backported in 5.4.136
+CVE_CHECK_WHITELIST += "CVE-2021-38204"
+
+# cpe-stable-backport: Backported in 5.4.141
+CVE_CHECK_WHITELIST += "CVE-2021-38205"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38206"
+
+# fixed-version: only affects 5.6rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38207"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-38208"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-38209"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-38300"
+
+# CVE-2021-3847 has no known resolution
+
+# CVE-2021-3864 has no known resolution
+
+# CVE-2021-3892 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2021-3894"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-3896"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-3923"
+
+# cpe-stable-backport: Backported in 5.4.144
+CVE_CHECK_WHITELIST += "CVE-2021-39633"
+
+# cpe-stable-backport: Backported in 5.4.70
+CVE_CHECK_WHITELIST += "CVE-2021-39634"
+
+# fixed-version: Fixed after version 4.16rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39636"
+
+# cpe-stable-backport: Backported in 5.4.89
+CVE_CHECK_WHITELIST += "CVE-2021-39648"
+
+# cpe-stable-backport: Backported in 5.4.106
+CVE_CHECK_WHITELIST += "CVE-2021-39656"
+
+# cpe-stable-backport: Backported in 5.4.93
+CVE_CHECK_WHITELIST += "CVE-2021-39657"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2021-39685"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-39686"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2021-39698"
+
+# fixed-version: Fixed after version 4.18rc6
+CVE_CHECK_WHITELIST += "CVE-2021-39711"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39713"
+
+# fixed-version: Fixed after version 4.12rc1
+CVE_CHECK_WHITELIST += "CVE-2021-39714"
+
+# CVE-2021-39800 has no known resolution
+
+# CVE-2021-39801 has no known resolution
+
+# CVE-2021-39802 has no known resolution
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4001"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2021-4002"
+
+# CVE-2021-4023 needs backporting (fixed from 5.15rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4028"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4032"
+
+# cpe-stable-backport: Backported in 5.4.241
+CVE_CHECK_WHITELIST += "CVE-2021-4037"
+
+# cpe-stable-backport: Backported in 5.4.145
+CVE_CHECK_WHITELIST += "CVE-2021-40490"
+
+# cpe-stable-backport: Backported in 5.4.164
+CVE_CHECK_WHITELIST += "CVE-2021-4083"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4090"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4093"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4095"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-41073"
+
+# cpe-stable-backport: Backported in 5.4.168
+CVE_CHECK_WHITELIST += "CVE-2021-4135"
+
+# CVE-2021-4148 needs backporting (fixed from 5.15)
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2021-4149"
+
+# CVE-2021-4150 needs backporting (fixed from 5.15rc7)
+
+# cpe-stable-backport: Backported in 5.4.134
+CVE_CHECK_WHITELIST += "CVE-2021-4154"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-4155"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-4157"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2021-4159"
+
+# cpe-stable-backport: Backported in 5.4.153
+CVE_CHECK_WHITELIST += "CVE-2021-41864"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2021-4197"
+
+# cpe-stable-backport: Backported in 5.4.143
+CVE_CHECK_WHITELIST += "CVE-2021-42008"
+
+# cpe-stable-backport: Backported in 5.4.162
+CVE_CHECK_WHITELIST += "CVE-2021-4202"
+
+# cpe-stable-backport: Backported in 5.4.151
+CVE_CHECK_WHITELIST += "CVE-2021-4203"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-4204"
+
+# CVE-2021-4218 needs backporting (fixed from 5.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.148
+CVE_CHECK_WHITELIST += "CVE-2021-42252"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-42327"
+
+# cpe-stable-backport: Backported in 5.4.158
+CVE_CHECK_WHITELIST += "CVE-2021-42739"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-43056"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-43057"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-43267"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2021-43389"
+
+# cpe-stable-backport: Backported in 5.4.164
+CVE_CHECK_WHITELIST += "CVE-2021-43975"
+
+# cpe-stable-backport: Backported in 5.4.174
+CVE_CHECK_WHITELIST += "CVE-2021-43976"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-44733"
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2021-44879"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2021-45095"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45100"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45402"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-45469"
+
+# fixed-version: only affects 5.13rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-45480"
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2021-45485"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-45486"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2021-45868"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46283"
+
+# cpe-stable-backport: Backported in 5.4.112
+CVE_CHECK_WHITELIST += "CVE-2021-46904"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46905"
+
+# cpe-stable-backport: Backported in 5.4.127
+CVE_CHECK_WHITELIST += "CVE-2021-46906"
+
+# CVE-2021-46908 needs backporting (fixed from 5.12rc8)
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-46909"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46910"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46911"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46912"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46913"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46914"
+
+# cpe-stable-backport: Backported in 5.4.114
+CVE_CHECK_WHITELIST += "CVE-2021-46915"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46916"
+
+# fixed-version: only affects 5.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46917"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46918"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46919"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46920"
+
+# cpe-stable-backport: Backported in 5.4.115
+CVE_CHECK_WHITELIST += "CVE-2021-46921"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46922"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46923"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46924"
+
+# CVE-2021-46925 needs backporting (fixed from 5.16rc8)
+
+# CVE-2021-46926 needs backporting (fixed from 5.16rc7)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46927"
+
+# CVE-2021-46928 needs backporting (fixed from 5.16rc7)
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46929"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46930"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46931"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46932"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46933"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46934"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46935"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2021-46936"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46937"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46938"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46939"
+
+# fixed-version: only affects 5.10rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46940"
+
+# CVE-2021-46941 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46942"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46943"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46944"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46945"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46947"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46948"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46949"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46950"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46951"
+
+# CVE-2021-46952 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46953"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46954"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46955"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46956"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46957"
+
+# fixed-version: only affects 5.7rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46958"
+
+# CVE-2021-46959 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46960"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46961"
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46962"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46963"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46964"
+
+# CVE-2021-46965 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.118
+CVE_CHECK_WHITELIST += "CVE-2021-46966"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46967"
+
+# fixed-version: only affects 5.10rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46968"
+
+# CVE-2021-46969 needs backporting (fixed from 5.13rc1)
+
+# CVE-2021-46970 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-46971"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46972"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46973"
+
+# cpe-stable-backport: Backported in 5.4.117
+CVE_CHECK_WHITELIST += "CVE-2021-46974"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46976"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46977"
+
+# fixed-version: only affects 5.11rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46978"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46979"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46980"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46981"
+
+# CVE-2021-46982 needs backporting (fixed from 5.13rc2)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46983"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46984"
+
+# fixed-version: only affects 5.12rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46985"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46986"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46987"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46988"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46989"
+
+# fixed-version: only affects 5.10rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46990"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46991"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46992"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46993"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46994"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46995"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46996"
+
+# fixed-version: only affects 5.10rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46997"
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-46998"
+
+# fixed-version: only affects 5.7rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-46999"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47000"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47001"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47002"
+
+# fixed-version: only affects 5.11 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47003"
+
+# CVE-2021-47004 needs backporting (fixed from 5.13rc1)
+
+# CVE-2021-47005 needs backporting (fixed from 5.13rc1)
+
+# cpe-stable-backport: Backported in 5.4.120
+CVE_CHECK_WHITELIST += "CVE-2021-47006"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47007"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47008"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47009"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47010"
+
+# fixed-version: only affects 5.11rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47011"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47012"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47013"
+
+# fixed-version: only affects 5.8rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47014"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47015"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47016"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47017"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47018"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47019"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47020"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47021"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47022"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47023"
+
+# CVE-2021-47024 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47025"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47026"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47027"
+
+# CVE-2021-47028 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47029"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47030"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47031"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47032"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47033"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47034"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47035"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47036"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47037"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47038"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47039"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47040"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47041"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47042"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47043"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47044"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47045"
+
+# CVE-2021-47046 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47047"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47048"
+
+# CVE-2021-47049 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47050"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47051"
+
+# CVE-2021-47052 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47053"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47054"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47055"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47056"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47057"
+
+# fixed-version: only affects 5.11rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47058"
+
+# CVE-2021-47059 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.9rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47060"
+
+# fixed-version: only affects 5.9rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47061"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47062"
+
+# CVE-2021-47063 needs backporting (fixed from 5.13rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47064"
+
+# cpe-stable-backport: Backported in 5.4.119
+CVE_CHECK_WHITELIST += "CVE-2021-47065"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47066"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47067"
+
+# fixed-version: only affects 5.12rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47068"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47069"
+
+# CVE-2021-47070 needs backporting (fixed from 5.13rc3)
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47071"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47072"
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47073"
+
+# CVE-2021-47074 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47075 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47076 needs backporting (fixed from 5.13rc3)
+
+# CVE-2021-47077 needs backporting (fixed from 5.13rc3)
+
+# cpe-stable-backport: Backported in 5.4.122
+CVE_CHECK_WHITELIST += "CVE-2021-47078"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47079"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47080"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47081"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2021-47082"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47083"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47086"
+
+# fixed-version: only affects 5.14rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47087"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47088"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47089"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47090"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47091"
+
+# fixed-version: only affects 5.15rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47092"
+
+# fixed-version: only affects 5.9 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47093"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47094"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47095"
+
+# fixed-version: only affects 5.15rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47096"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47097"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47098"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47099"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2021-47100"
+
+# CVE-2021-47101 needs backporting (fixed from 5.16rc7)
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47102"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2021-47103"
+
+# fixed-version: only affects 5.15 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47104"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47105"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47106"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47107"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47108"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47109"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47110"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47111"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47112"
+
+# CVE-2021-47113 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47114"
+
+# CVE-2021-47116 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47117"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47118"
+
+# CVE-2021-47119 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47120"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47121"
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47122"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47123"
+
+# CVE-2021-47124 needs backporting (fixed from 5.13rc2)
+
+# CVE-2021-47125 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47126"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47127"
+
+# CVE-2021-47128 needs backporting (fixed from 5.13rc5)
+
+# cpe-stable-backport: Backported in 5.4.125
+CVE_CHECK_WHITELIST += "CVE-2021-47129"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47130"
+
+# CVE-2021-47131 needs backporting (fixed from 5.13rc5)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47132"
+
+# CVE-2021-47133 needs backporting (fixed from 5.13rc5)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47134"
+
+# CVE-2021-47135 needs backporting (fixed from 5.13rc5)
+
+# CVE-2021-47136 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47137"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47138"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47139"
+
+# CVE-2021-47140 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47141"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47142"
+
+# CVE-2021-47143 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47144"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47145"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47146"
+
+# CVE-2021-47147 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47148"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47149"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47150"
+
+# CVE-2021-47151 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47152"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47153"
+
+# CVE-2021-47158 needs backporting (fixed from 5.13rc4)
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47159"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47160"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47161"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47162"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47163"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47164"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47165"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47166"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47167"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47168"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47169"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47170"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47171"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47172"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47173"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47174"
+
+# CVE-2021-47175 needs backporting (fixed from 5.13rc4)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47176"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47177"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2021-47178"
+
+# cpe-stable-backport: Backported in 5.4.124
+CVE_CHECK_WHITELIST += "CVE-2021-47179"
+
+# cpe-stable-backport: Backported in 5.4.123
+CVE_CHECK_WHITELIST += "CVE-2021-47180"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-0001"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-0002"
+
+# CVE-2022-0168 needs backporting (fixed from 5.18rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0171"
+
+# cpe-stable-backport: Backported in 5.4.173
+CVE_CHECK_WHITELIST += "CVE-2022-0185"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0264"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0286"
+
+# cpe-stable-backport: Backported in 5.4.155
+CVE_CHECK_WHITELIST += "CVE-2022-0322"
+
+# cpe-stable-backport: Backported in 5.4.175
+CVE_CHECK_WHITELIST += "CVE-2022-0330"
+
+# CVE-2022-0382 needs backporting (fixed from 5.16)
+
+# CVE-2022-0400 has no known resolution
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0433"
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-0435"
+
+# CVE-2022-0480 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-0487"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-0492"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-0494"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0500"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0516"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-0617"
+
+# cpe-stable-backport: Backported in 5.4.156
+CVE_CHECK_WHITELIST += "CVE-2022-0644"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0646"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0742"
+
+# cpe-stable-backport: Backported in 5.4.53
+CVE_CHECK_WHITELIST += "CVE-2022-0812"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0847"
+
+# cpe-stable-backport: Backported in 5.4.132
+CVE_CHECK_WHITELIST += "CVE-2022-0850"
+
+# fixed-version: only affects 5.17rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0854"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0995"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-0998"
+
+# cpe-stable-backport: Backported in 5.4.185
+CVE_CHECK_WHITELIST += "CVE-2022-1011"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-1012"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1015"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-1016"
+
+# fixed-version: only affects 5.12rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1043"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1048"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-1055"
+
+# CVE-2022-1116 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1158"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-1184"
+
+# cpe-stable-backport: Backported in 5.4.169
+CVE_CHECK_WHITELIST += "CVE-2022-1195"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1198"
+
+# cpe-stable-backport: Backported in 5.4.185
+CVE_CHECK_WHITELIST += "CVE-2022-1199"
+
+# cpe-stable-backport: Backported in 5.4.190
+CVE_CHECK_WHITELIST += "CVE-2022-1204"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1205"
+
+# CVE-2022-1247 has no known resolution
+
+# CVE-2022-1263 needs backporting (fixed from 5.18rc3)
+
+# CVE-2022-1280 needs backporting (fixed from 5.15rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-1353"
+
+# cpe-stable-backport: Backported in 5.4.21
+CVE_CHECK_WHITELIST += "CVE-2022-1419"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-1462"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1508"
+
+# fixed-version: only affects 5.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1516"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1651"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-1652"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1671"
+
+# fixed-version: Fixed after version 4.20rc1
+CVE_CHECK_WHITELIST += "CVE-2022-1678"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-1679"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-1729"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1734"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1786"
+
+# CVE-2022-1789 needs backporting (fixed from 5.18)
+
+# cpe-stable-backport: Backported in 5.4.192
+CVE_CHECK_WHITELIST += "CVE-2022-1836"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1852"
+
+# fixed-version: only affects 5.17rc8 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1882"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1943"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-1966"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1973"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1974"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2022-1975"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1976"
+
+# fixed-version: only affects 5.13rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-1998"
+
+# cpe-stable-backport: Backported in 5.4.181
+CVE_CHECK_WHITELIST += "CVE-2022-20008"
+
+# cpe-stable-backport: Backported in 5.4.165
+CVE_CHECK_WHITELIST += "CVE-2022-20132"
+
+# cpe-stable-backport: Backported in 5.4.145
+CVE_CHECK_WHITELIST += "CVE-2022-20141"
+
+# CVE-2022-20148 needs backporting (fixed from 5.16rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20153"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2022-20154"
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-20158"
+
+# CVE-2022-20166 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-20368"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2022-20369"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20409"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-20421"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-20422"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20423"
+
+# CVE-2022-20424 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.63
+CVE_CHECK_WHITELIST += "CVE-2022-20565"
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2022-20566"
+
+# fixed-version: Fixed after version 4.16rc5
+CVE_CHECK_WHITELIST += "CVE-2022-20567"
+
+# fixed-version: only affects 5.7rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-20568"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-20572"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2078"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21123"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21125"
+
+# cpe-stable-backport: Backported in 5.4.199
+CVE_CHECK_WHITELIST += "CVE-2022-21166"
+
+# fixed-version: Fixed after version 4.20
+CVE_CHECK_WHITELIST += "CVE-2022-21385"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-21499"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-21505"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2153"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2196"
+
+# CVE-2022-2209 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.175
+CVE_CHECK_WHITELIST += "CVE-2022-22942"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23036"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23037"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23038"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23039"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23040"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23041"
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23042"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2308"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-2318"
+
+# CVE-2022-23222 needs backporting (fixed from 5.17rc1)
+
+# CVE-2022-2327 needs backporting (fixed from 5.12rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-2380"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-23816"
+
+# CVE-2022-23825 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.184
+CVE_CHECK_WHITELIST += "CVE-2022-23960"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-24122"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-24448"
+
+# cpe-stable-backport: Backported in 5.4.183
+CVE_CHECK_WHITELIST += "CVE-2022-24958"
+
+# cpe-stable-backport: Backported in 5.4.176
+CVE_CHECK_WHITELIST += "CVE-2022-24959"
+
+# cpe-stable-backport: Backported in 5.4.197
+CVE_CHECK_WHITELIST += "CVE-2022-2503"
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-25258"
+
+# CVE-2022-25265 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-25375"
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-25636"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2585"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2586"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-2588"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2590"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-2602"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-26365"
+
+# cpe-stable-backport: Backported in 5.4.210
+CVE_CHECK_WHITELIST += "CVE-2022-26373"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-2639"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-26490"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-2663"
+
+# CVE-2022-26878 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-26966"
+
+# cpe-stable-backport: Backported in 5.4.182
+CVE_CHECK_WHITELIST += "CVE-2022-27223"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-27666"
+
+# CVE-2022-27672 needs backporting (fixed from 6.2)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2785"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-27950"
+
+# cpe-stable-backport: Backported in 5.4.188
+CVE_CHECK_WHITELIST += "CVE-2022-28356"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-28388"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-28389"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-28390"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2873"
+
+# fixed-version: only affects 5.17rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-28796"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-28893"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2905"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29156"
+
+# cpe-stable-backport: Backported in 5.4.177
+CVE_CHECK_WHITELIST += "CVE-2022-2938"
+
+# cpe-stable-backport: Backported in 5.4.191
+CVE_CHECK_WHITELIST += "CVE-2022-29581"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29582"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-2959"
+
+# CVE-2022-2961 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.180
+CVE_CHECK_WHITELIST += "CVE-2022-2964"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-2977"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-2978"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-29900"
+
+# cpe-stable-backport: Backported in 5.4.217
+CVE_CHECK_WHITELIST += "CVE-2022-29901"
+
+# CVE-2022-2991 needs backporting (fixed from 5.15rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-29968"
+
+# cpe-stable-backport: Backported in 5.4.212
+CVE_CHECK_WHITELIST += "CVE-2022-3028"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-30594"
+
+# CVE-2022-3061 needs backporting (fixed from 5.18rc5)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3077"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3078"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3103"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3104"
+
+# cpe-stable-backport: Backported in 5.4.171
+CVE_CHECK_WHITELIST += "CVE-2022-3105"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3106"
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-3107"
+
+# CVE-2022-3108 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3110"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3111"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3112"
+
+# fixed-version: only affects 5.10rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3113"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3114"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-3115"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2022-3169"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3170"
+
+# CVE-2022-3176 needs backporting (fixed from 5.17rc1)
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3202"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-32250"
+
+# cpe-stable-backport: Backported in 5.4.201
+CVE_CHECK_WHITELIST += "CVE-2022-32296"
+
+# CVE-2022-3238 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2022-3239"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-32981"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-3303"
+
+# CVE-2022-3344 needs backporting (fixed from 6.1rc7)
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33740"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33741"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33742"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-33743"
+
+# cpe-stable-backport: Backported in 5.4.204
+CVE_CHECK_WHITELIST += "CVE-2022-33744"
+
+# cpe-stable-backport: Backported in 5.4.192
+CVE_CHECK_WHITELIST += "CVE-2022-33981"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-3424"
+
+# fixed-version: only affects 5.18rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3435"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-34494"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-34495"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2022-34918"
+
+# cpe-stable-backport: Backported in 5.4.225
+CVE_CHECK_WHITELIST += "CVE-2022-3521"
+
+# CVE-2022-3522 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3523 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3524"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3526"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3531"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3532"
+
+# CVE-2022-3533 has no known resolution
+
+# CVE-2022-3534 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3535"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3541"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3542"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3543"
+
+# CVE-2022-3544 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.228
+CVE_CHECK_WHITELIST += "CVE-2022-3545"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3564"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3565"
+
+# CVE-2022-3566 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3567 needs backporting (fixed from 6.1rc1)
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2022-3577"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-3586"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3594"
+
+# CVE-2022-3595 needs backporting (fixed from 6.1rc1)
+
+# CVE-2022-3606 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.207
+CVE_CHECK_WHITELIST += "CVE-2022-36123"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3619"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-3621"
+
+# cpe-stable-backport: Backported in 5.4.228
+CVE_CHECK_WHITELIST += "CVE-2022-3623"
+
+# CVE-2022-3624 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3625"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-3628"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-36280"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3629"
+
+# fixed-version: only affects 5.19rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3630"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3633"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-3635"
+
+# CVE-2022-3636 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 5.19 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3640"
+
+# CVE-2022-36402 needs backporting (fixed from 6.5)
+
+# CVE-2022-3642 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.227
+CVE_CHECK_WHITELIST += "CVE-2022-3643"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-3646"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-3649"
+
+# cpe-stable-backport: Backported in 5.4.208
+CVE_CHECK_WHITELIST += "CVE-2022-36879"
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2022-36946"
+
+# cpe-stable-backport: Backported in 5.4.233
+CVE_CHECK_WHITELIST += "CVE-2022-3707"
+
+# CVE-2022-38096 has no known resolution
+
+# CVE-2022-38457 needs backporting (fixed from 6.2rc4)
+
+# CVE-2022-3903 needs backporting (fixed from 6.1rc2)
+
+# fixed-version: only affects 5.18 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3910"
+
+# CVE-2022-39188 needs backporting (fixed from 5.19rc8)
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2022-39189"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-39190"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-3977"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-39842"
+
+# CVE-2022-40133 needs backporting (fixed from 6.2rc4)
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-40307"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-40476"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-40768"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-4095"
+
+# cpe-stable-backport: Backported in 5.4.252
+CVE_CHECK_WHITELIST += "CVE-2022-40982"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-41218"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2022-41222"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4127"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4128"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2022-4129"
+
+# fixed-version: only affects 5.17rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4139"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-41674"
+
+# CVE-2022-41848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-41849"
+
+# cpe-stable-backport: Backported in 5.4.220
+CVE_CHECK_WHITELIST += "CVE-2022-41850"
+
+# cpe-stable-backport: Backported in 5.4.190
+CVE_CHECK_WHITELIST += "CVE-2022-41858"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42328"
+
+# fixed-version: only affects 5.16rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42329"
+
+# cpe-stable-backport: Backported in 5.4.215
+CVE_CHECK_WHITELIST += "CVE-2022-42432"
+
+# CVE-2022-4269 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.4.212
+CVE_CHECK_WHITELIST += "CVE-2022-42703"
+
+# cpe-stable-backport: Backported in 5.4.219
+CVE_CHECK_WHITELIST += "CVE-2022-42719"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-42720"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-42721"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-42722"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2022-42895"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2022-42896"
+
+# cpe-stable-backport: Backported in 5.4.218
+CVE_CHECK_WHITELIST += "CVE-2022-43750"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4378"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4379"
+
+# cpe-stable-backport: Backported in 5.4.230
+CVE_CHECK_WHITELIST += "CVE-2022-4382"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-43945"
+
+# CVE-2022-44032 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44033 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-44034 needs backporting (fixed from 6.4rc1)
+
+# CVE-2022-4543 has no known resolution
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-45869"
+
+# CVE-2022-45884 has no known resolution
+
+# CVE-2022-45885 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45886"
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45887"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-45888"
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2022-45919"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-45934"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2022-4662"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4696"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2022-4744"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47518"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47519"
+
+# CVE-2022-47520 needs backporting (fixed from 6.1rc8)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47521"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2022-47929"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47938"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47939"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47940"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47941"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47942"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-47943"
+
+# CVE-2022-47946 needs backporting (fixed from 5.12rc2)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-4842"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48423"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48424"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48425"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48502"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2022-48619"
+
+# cpe-stable-backport: Backported in 5.4.179
+CVE_CHECK_WHITELIST += "CVE-2022-48626"
+
+# CVE-2022-48627 needs backporting (fixed from 5.19rc7)
+
+# CVE-2022-48628 needs backporting (fixed from 6.6rc1)
+
+# cpe-stable-backport: Backported in 5.4.187
+CVE_CHECK_WHITELIST += "CVE-2022-48629"
+
+# fixed-version: only affects 5.17 onwards
+CVE_CHECK_WHITELIST += "CVE-2022-48630"
+
+# fixed-version: Fixed after version 5.0rc1
+CVE_CHECK_WHITELIST += "CVE-2023-0030"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0045"
+
+# cpe-stable-backport: Backported in 5.4.160
+CVE_CHECK_WHITELIST += "CVE-2023-0047"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0122"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-0160"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0179"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0210"
+
+# CVE-2023-0240 needs backporting (fixed from 5.10rc1)
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0266"
+
+# CVE-2023-0386 needs backporting (fixed from 6.2rc6)
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0394"
+
+# cpe-stable-backport: Backported in 5.4.230
+CVE_CHECK_WHITELIST += "CVE-2023-0458"
+
+# cpe-stable-backport: Backported in 5.4.233
+CVE_CHECK_WHITELIST += "CVE-2023-0459"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-0461"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0468"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-0469"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-0590"
+
+# CVE-2023-0597 needs backporting (fixed from 6.2rc1)
+
+# cpe-stable-backport: Backported in 5.4.223
+CVE_CHECK_WHITELIST += "CVE-2023-0615"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1032"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-1073"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-1074"
+
+# CVE-2023-1075 needs backporting (fixed from 6.2rc7)
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1076"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1077"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-1078"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1079"
+
+# cpe-stable-backport: Backported in 5.4.211
+CVE_CHECK_WHITELIST += "CVE-2023-1095"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1118"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1192"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1193"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1194"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1195"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-1206"
+
+# CVE-2023-1249 needs backporting (fixed from 5.18rc1)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1252"
+
+# CVE-2023-1281 needs backporting (fixed from 6.2)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1295"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-1380"
+
+# cpe-stable-backport: Backported in 5.4.226
+CVE_CHECK_WHITELIST += "CVE-2023-1382"
+
+# cpe-stable-backport: Backported in 5.4.92
+CVE_CHECK_WHITELIST += "CVE-2023-1390"
+
+# CVE-2023-1476 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-1513"
+
+# CVE-2023-1582 needs backporting (fixed from 5.17rc4)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1583"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-1611"
+
+# cpe-stable-backport: Backported in 5.4.189
+CVE_CHECK_WHITELIST += "CVE-2023-1637"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1652"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-1670"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-1829"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-1838"
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-1855"
+
+# cpe-stable-backport: Backported in 5.4.241
+CVE_CHECK_WHITELIST += "CVE-2023-1859"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1872"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-1989"
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-1990"
+
+# fixed-version: only affects 5.19rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-1998"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-2002"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2006"
+
+# CVE-2023-2007 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.202
+CVE_CHECK_WHITELIST += "CVE-2023-2008"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2019"
+
+# cpe-stable-backport: Backported in 5.4.252
+CVE_CHECK_WHITELIST += "CVE-2023-20569"
+
+# CVE-2023-20588 needs backporting (fixed from 6.5rc6)
+
+# cpe-stable-backport: Backported in 5.4.250
+CVE_CHECK_WHITELIST += "CVE-2023-20593"
+
+# CVE-2023-20928 needs backporting (fixed from 6.0rc1)
+
+# CVE-2023-20937 has no known resolution
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-20938"
+
+# CVE-2023-20941 has no known resolution
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21102"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21106"
+
+# cpe-stable-backport: Backported in 5.4.249
+CVE_CHECK_WHITELIST += "CVE-2023-2124"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21255"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-21264"
+
+# CVE-2023-21400 has no known resolution
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2156"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-2162"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2163"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2166"
+
+# CVE-2023-2176 needs backporting (fixed from 6.3rc1)
+
+# cpe-stable-backport: Backported in 5.4.209
+CVE_CHECK_WHITELIST += "CVE-2023-2177"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-2194"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2235"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2236"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2248"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-2269"
+
+# CVE-2023-22995 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22996"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22997"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22998"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-22999"
+
+# CVE-2023-23000 needs backporting (fixed from 5.17rc1)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23001"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23002"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23003"
+
+# CVE-2023-23004 needs backporting (fixed from 5.19rc1)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23005"
+
+# cpe-stable-backport: Backported in 5.4.170
+CVE_CHECK_WHITELIST += "CVE-2023-23006"
+
+# CVE-2023-23039 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-23454"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-23455"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-23559"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-23586"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2430"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-2483"
+
+# fixed-version: only affects 5.6rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-25012"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-2513"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-25775"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2598"
+
+# CVE-2023-26242 has no known resolution
+
+# CVE-2023-2640 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26544"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-26545"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26605"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-26606"
+
+# cpe-stable-backport: Backported in 5.4.225
+CVE_CHECK_WHITELIST += "CVE-2023-26607"
+
+# cpe-stable-backport: Backported in 5.4.227
+CVE_CHECK_WHITELIST += "CVE-2023-28327"
+
+# cpe-stable-backport: Backported in 5.4.229
+CVE_CHECK_WHITELIST += "CVE-2023-28328"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28410"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28464"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-28466"
+
+# cpe-stable-backport: Backported in 5.4.213
+CVE_CHECK_WHITELIST += "CVE-2023-2860"
+
+# CVE-2023-28746 needs backporting (fixed from 6.9rc1)
+
+# cpe-stable-backport: Backported in 5.4.133
+CVE_CHECK_WHITELIST += "CVE-2023-28772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-28866"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-2898"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-2985"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-3006"
+
+# Skipping CVE-2023-3022, no affected_versions
+
+# cpe-stable-backport: Backported in 5.4.238
+CVE_CHECK_WHITELIST += "CVE-2023-30456"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-30772"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2023-3090"
+
+# fixed-version: Fixed after version 4.8rc7
+CVE_CHECK_WHITELIST += "CVE-2023-3106"
+
+# Skipping CVE-2023-3108, no affected_versions
+
+# CVE-2023-31081 has no known resolution
+
+# CVE-2023-31082 has no known resolution
+
+# CVE-2023-31083 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-31084 needs backporting (fixed from 6.4rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-31085"
+
+# cpe-stable-backport: Backported in 5.4.247
+CVE_CHECK_WHITELIST += "CVE-2023-3111"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3117"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-31248"
+
+# cpe-stable-backport: Backported in 5.4.244
+CVE_CHECK_WHITELIST += "CVE-2023-3141"
+
+# cpe-stable-backport: Backported in 5.4.242
+CVE_CHECK_WHITELIST += "CVE-2023-31436"
+
+# cpe-stable-backport: Backported in 5.4.193
+CVE_CHECK_WHITELIST += "CVE-2023-3159"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-3161"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3212"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-3220"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-32233"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32247"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32248"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32250"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32252"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32254"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32257"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-32258"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-32269"
+
+# CVE-2023-32629 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-3268"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3269"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3312"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3317"
+
+# cpe-stable-backport: Backported in 5.4.240
+CVE_CHECK_WHITELIST += "CVE-2023-33203"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33250"
+
+# CVE-2023-33288 needs backporting (fixed from 6.3rc4)
+
+# cpe-stable-backport: Backported in 5.4.248
+CVE_CHECK_WHITELIST += "CVE-2023-3338"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3355"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3357"
+
+# cpe-stable-backport: Backported in 5.4.231
+CVE_CHECK_WHITELIST += "CVE-2023-3358"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3359"
+
+# CVE-2023-3389 needs backporting (fixed from 6.0rc1)
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3390"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33951"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-33952"
+
+# CVE-2023-3397 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.249
+CVE_CHECK_WHITELIST += "CVE-2023-34255"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-34256"
+
+# fixed-version: only affects 6.1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-34319"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-34324"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3439"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-35001"
+
+# cpe-stable-backport: Backported in 5.4.232
+CVE_CHECK_WHITELIST += "CVE-2023-3567"
+
+# CVE-2023-35693 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.246
+CVE_CHECK_WHITELIST += "CVE-2023-35788"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35823"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35824"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-35826"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-35827"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2023-35828"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-35829"
+
+# cpe-stable-backport: Backported in 5.4.248
+CVE_CHECK_WHITELIST += "CVE-2023-3609"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3610"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-3611"
+
+# CVE-2023-3640 has no known resolution
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-37453"
+
+# CVE-2023-37454 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-3772"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3773"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3776"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3777"
+
+# cpe-stable-backport: Backported in 5.4.224
+CVE_CHECK_WHITELIST += "CVE-2023-3812"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38409"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38426"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38427"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38428"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38429"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38430"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38431"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-38432"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-3863"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3865"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3866"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-3867"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39189"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-39191"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39192"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-39193"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-39194"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-39197"
+
+# CVE-2023-39198 needs backporting (fixed from 6.5rc7)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4004"
+
+# CVE-2023-4010 has no known resolution
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4015"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-40283"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-40791"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4128"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-4132"
+
+# CVE-2023-4133 needs backporting (fixed from 6.3)
+
+# CVE-2023-4134 needs backporting (fixed from 6.5rc1)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4147"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4155"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4194"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4206"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4207"
+
+# cpe-stable-backport: Backported in 5.4.253
+CVE_CHECK_WHITELIST += "CVE-2023-4208"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4244"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4273"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42752"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42753"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-42754"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-42755"
+
+# fixed-version: only affects 6.4rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-42756"
+
+# cpe-stable-backport: Backported in 5.4.198
+CVE_CHECK_WHITELIST += "CVE-2023-4385"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-4387"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4389"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4394"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-44466"
+
+# cpe-stable-backport: Backported in 5.4.196
+CVE_CHECK_WHITELIST += "CVE-2023-4459"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4563"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4569"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-45862"
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2023-45863"
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-45871"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-45898"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4610"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4611"
+
+# CVE-2023-4622 needs backporting (fixed from 6.5rc1)
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-4623"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-46343"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-46813"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-46838"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-46862"
+
+# CVE-2023-47233 needs backporting (fixed from 6.9rc1)
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-4732"
+
+# CVE-2023-4881 needs backporting (fixed from 6.6rc1)
+
+# cpe-stable-backport: Backported in 5.4.257
+CVE_CHECK_WHITELIST += "CVE-2023-4921"
+
+# CVE-2023-50431 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5090"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2023-51042"
+
+# cpe-stable-backport: Backported in 5.4.251
+CVE_CHECK_WHITELIST += "CVE-2023-51043"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5158"
+
+# CVE-2023-51779 needs backporting (fixed from 6.7rc7)
+
+# cpe-stable-backport: Backported in 5.4.260
+CVE_CHECK_WHITELIST += "CVE-2023-5178"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51780"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51781"
+
+# cpe-stable-backport: Backported in 5.4.265
+CVE_CHECK_WHITELIST += "CVE-2023-51782"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5197"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2023-52340"
+
+# CVE-2023-52429 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52433"
+
+# CVE-2023-52434 needs backporting (fixed from 6.7rc6)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52435"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52436"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52438"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52439"
+
+# fixed-version: only affects 5.17rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52440"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52441"
+
+# CVE-2023-52442 needs backporting (fixed from 6.5rc4)
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52443"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52444"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52445"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52446"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52447"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52448"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52449"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52450"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52451"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52452"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52453"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52454"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52455"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52456"
+
+# fixed-version: only affects 6.1rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52457"
+
+# CVE-2023-52458 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52459"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52460"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52461"
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52462"
+
+# fixed-version: only affects 5.8rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52463"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52464"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52465"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52467"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52468"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52469"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52470"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52471"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52472"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52473"
+
+# CVE-2023-52474 needs backporting (fixed from 6.4rc1)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52475"
+
+# CVE-2023-52476 needs backporting (fixed from 6.6rc6)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52477"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52478"
+
+# CVE-2023-52479 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52480 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52481 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52482 needs backporting (fixed from 6.6rc4)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52483"
+
+# CVE-2023-52484 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52485 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52486"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52487"
+
+# CVE-2023-52488 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52489 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52490"
+
+# CVE-2023-52491 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52492"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52493"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52494"
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52495"
+
+# CVE-2023-52497 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52498 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52499"
+
+# CVE-2023-52500 needs backporting (fixed from 6.6rc2)
+
+# CVE-2023-52501 needs backporting (fixed from 6.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52502"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52503"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2023-52504"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52505"
+
+# CVE-2023-52506 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52507"
+
+# CVE-2023-52508 needs backporting (fixed from 6.6rc2)
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52509"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-52510"
+
+# CVE-2023-52511 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52512"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52513"
+
+# CVE-2023-52515 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52516 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-52517 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52518"
+
+# CVE-2023-52519 needs backporting (fixed from 6.6rc5)
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52520"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52522"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52523"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52524"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52525"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52526"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52527"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52528"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52529"
+
+# CVE-2023-52530 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52531 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52532 needs backporting (fixed from 6.6rc5)
+
+# CVE-2023-52559 needs backporting (fixed from 6.6rc5)
+
+# fixed-version: only affects 5.16rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52560"
+
+# CVE-2023-52561 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 6.0rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52562"
+
+# CVE-2023-52563 needs backporting (fixed from 6.6rc3)
+
+# fixed-version: only affects 6.5rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52564"
+
+# CVE-2023-52565 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52566"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52567"
+
+# CVE-2023-52568 needs backporting (fixed from 6.6rc4)
+
+# CVE-2023-52569 needs backporting (fixed from 6.6rc2)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52570"
+
+# CVE-2023-52571 needs backporting (fixed from 6.6rc4)
+
+# CVE-2023-52572 needs backporting (fixed from 6.6rc3)
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52573"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52574"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52575"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52576"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52577"
+
+# cpe-stable-backport: Backported in 5.4.258
+CVE_CHECK_WHITELIST += "CVE-2023-52578"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52580"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52581"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52582"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52583"
+
+# CVE-2023-52584 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52585 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52586 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52587"
+
+# CVE-2023-52588 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52589 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52590 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52591 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52593 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52594"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52595"
+
+# CVE-2023-52596 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52597"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52598"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52599"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52600"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52601"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52602"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52603"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52604"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52606"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52607"
+
+# fixed-version: only affects 5.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52608"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52609"
+
+# CVE-2023-52610 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52611"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-52612"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52613"
+
+# CVE-2023-52614 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52615"
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52616"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52617"
+
+# CVE-2023-52618 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52619"
+
+# CVE-2023-52620 needs backporting (fixed from 6.4)
+
+# CVE-2023-52621 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52622"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52623"
+
+# CVE-2023-52624 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52625 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.7rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52626"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52627"
+
+# CVE-2023-52628 needs backporting (fixed from 6.6rc1)
+
+# CVE-2023-52629 needs backporting (fixed from 6.6rc1)
+
+# fixed-version: only affects 5.10rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52630"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52631"
+
+# CVE-2023-52632 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52633 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52634 needs backporting (fixed from 6.8rc1)
+
+# CVE-2023-52635 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-52636"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2023-52637"
+
+# CVE-2023-52638 needs backporting (fixed from 6.8rc5)
+
+# CVE-2023-52639 needs backporting (fixed from 6.8rc4)
+
+# CVE-2023-52640 needs backporting (fixed from 6.8rc4)
+
+# CVE-2023-52641 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5345"
+
+# fixed-version: only affects 6.2 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5633"
+
+# cpe-stable-backport: Backported in 5.4.259
+CVE_CHECK_WHITELIST += "CVE-2023-5717"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-5972"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6039"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2023-6040"
+
+# fixed-version: only affects 6.6rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6111"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2023-6121"
+
+# fixed-version: only affects 5.7rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6176"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6200"
+
+# CVE-2023-6238 has no known resolution
+
+# CVE-2023-6240 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2023-6270"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6356"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6531"
+
+# CVE-2023-6535 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6536"
+
+# CVE-2023-6546 needs backporting (fixed from 6.5rc7)
+
+# CVE-2023-6560 needs backporting (fixed from 6.7rc4)
+
+# cpe-stable-backport: Backported in 5.4.266
+CVE_CHECK_WHITELIST += "CVE-2023-6606"
+
+# CVE-2023-6610 needs backporting (fixed from 6.7rc7)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6622"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6679"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2023-6817"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2023-6915"
+
+# cpe-stable-backport: Backported in 5.4.264
+CVE_CHECK_WHITELIST += "CVE-2023-6931"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2023-6932"
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2023-7042"
+
+# cpe-stable-backport: Backported in 5.4.235
+CVE_CHECK_WHITELIST += "CVE-2023-7192"
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0193"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-0340"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0443"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0562"
+
+# CVE-2024-0564 has no known resolution
+
+# CVE-2024-0565 needs backporting (fixed from 6.7rc6)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0582"
+
+# cpe-stable-backport: Backported in 5.4.263
+CVE_CHECK_WHITELIST += "CVE-2024-0584"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-0607"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0639"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-0641"
+
+# cpe-stable-backport: Backported in 5.4.267
+CVE_CHECK_WHITELIST += "CVE-2024-0646"
+
+# cpe-stable-backport: Backported in 5.4.243
+CVE_CHECK_WHITELIST += "CVE-2024-0775"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-0841"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-1085"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-1086"
+
+# CVE-2024-1151 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-1312 needs backporting (fixed from 6.5rc4)
+
+# CVE-2024-21803 has no known resolution
+
+# CVE-2024-2193 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2024-22099"
+
+# CVE-2024-22386 has no known resolution
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-22705"
+
+# cpe-stable-backport: Backported in 5.4.255
+CVE_CHECK_WHITELIST += "CVE-2024-23196"
+
+# CVE-2024-23307 needs backporting (fixed from 6.9rc1)
+
+# CVE-2024-23848 has no known resolution
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-23849"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-23850"
+
+# CVE-2024-23851 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-24855 needs backporting (fixed from 6.5rc2)
+
+# CVE-2024-24857 has no known resolution
+
+# CVE-2024-24858 has no known resolution
+
+# CVE-2024-24859 has no known resolution
+
+# CVE-2024-24860 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-24861 needs backporting (fixed from 6.9rc1)
+
+# CVE-2024-24864 has no known resolution
+
+# CVE-2024-25739 has no known resolution
+
+# CVE-2024-25740 has no known resolution
+
+# CVE-2024-25741 has no known resolution
+
+# CVE-2024-25744 needs backporting (fixed from 6.7rc5)
+
+# fixed-version: only affects 6.5rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26581"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26582"
+
+# fixed-version: only affects 5.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26583"
+
+# CVE-2024-26584 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26585 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26586 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26587"
+
+# fixed-version: only affects 6.1rc3 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26588"
+
+# CVE-2024-26589 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 5.16rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26590"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26591"
+
+# CVE-2024-26592 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26593"
+
+# CVE-2024-26594 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26595 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26596"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2024-26597"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26598"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26599"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26600"
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26601"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26602"
+
+# fixed-version: only affects 5.14rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26603"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26604"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26605"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26606"
+
+# CVE-2024-26607 needs backporting (fixed from 6.8rc2)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26608"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26610"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26611"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26612"
+
+# CVE-2024-26614 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26615"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26616"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26617"
+
+# fixed-version: only affects 6.5rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26618"
+
+# fixed-version: only affects 6.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26619"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26620"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26621"
+
+# CVE-2024-26622 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26623 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26625"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26626"
+
+# CVE-2024-26627 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26629"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26630"
+
+# fixed-version: only affects 5.13rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26631"
+
+# fixed-version: only affects 5.17rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26632"
+
+# cpe-stable-backport: Backported in 5.4.268
+CVE_CHECK_WHITELIST += "CVE-2024-26633"
+
+# fixed-version: only affects 6.6rc7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26634"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26635"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26636"
+
+# fixed-version: only affects 6.7 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26637"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26638"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26639"
+
+# CVE-2024-26640 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26641 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26642 needs backporting (fixed from 6.8)
+
+# fixed-version: only affects 6.5rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26643"
+
+# CVE-2024-26644 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26645"
+
+# CVE-2024-26646 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26647 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26648 needs backporting (fixed from 6.8rc1)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26649"
+
+# CVE-2024-26650 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.273
+CVE_CHECK_WHITELIST += "CVE-2024-26651"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26652"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26653"
+
+# CVE-2024-26654 needs backporting (fixed from 6.9rc2)
+
+# CVE-2024-26655 needs backporting (fixed from 6.9rc2)
+
+# CVE-2024-26656 needs backporting (fixed from 6.9rc1)
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26657"
+
+# CVE-2024-26658 needs backporting (fixed from 6.8rc1)
+
+# CVE-2024-26659 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.11rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26660"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26661"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26662"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26663"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26664"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26665"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26666"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26667"
+
+# CVE-2024-26668 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26669 needs backporting (fixed from 6.8rc2)
+
+# fixed-version: only affects 6.6rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26670"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26671"
+
+# CVE-2024-26672 needs backporting (fixed from 6.8rc1)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26673"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26674"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26675"
+
+# CVE-2024-26676 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26677 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26678"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26679"
+
+# fixed-version: only affects 5.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26680"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26681"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26682"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26683"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26684"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26685"
+
+# CVE-2024-26686 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26687 needs backporting (fixed from 6.8rc5)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26688"
+
+# CVE-2024-26689 needs backporting (fixed from 6.8rc4)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26690"
+
+# CVE-2024-26691 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26692"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26693"
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26694"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26695"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26696"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26697"
+
+# fixed-version: only affects 5.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26698"
+
+# CVE-2024-26699 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26700 needs backporting (fixed from 6.8rc4)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26702"
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26703"
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26704"
+
+# fixed-version: only affects 6.6rc2 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26705"
+
+# CVE-2024-26706 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26707"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26708"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26709"
+
+# fixed-version: only affects 6.8rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26710"
+
+# fixed-version: only affects 6.2rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26711"
+
+# CVE-2024-26712 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26713 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26714"
+
+# CVE-2024-26715 needs backporting (fixed from 6.8rc3)
+
+# fixed-version: only affects 6.5rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26716"
+
+# fixed-version: only affects 5.12rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26717"
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26718"
+
+# CVE-2024-26719 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.269
+CVE_CHECK_WHITELIST += "CVE-2024-26720"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26721"
+
+# fixed-version: only affects 6.7rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26722"
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26723"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26724"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26725"
+
+# CVE-2024-26726 needs backporting (fixed from 6.8rc5)
+
+# fixed-version: only affects 5.9rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26727"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26728"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26729"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26730"
+
+# fixed-version: only affects 6.4rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26731"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26732"
+
+# CVE-2024-26733 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 6.3rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26734"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26735"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26736"
+
+# fixed-version: only affects 5.15rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26737"
+
+# CVE-2024-26738 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26739 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26740 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 6.1rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26741"
+
+# fixed-version: only affects 6.0rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26742"
+
+# CVE-2024-26743 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26744 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26745 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26746"
+
+# CVE-2024-26747 needs backporting (fixed from 6.8rc6)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26748"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26749"
+
+# fixed-version: only affects 6.8rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26750"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26751"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26752"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26753"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26754"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26755"
+
+# CVE-2024-26756 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26757 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26758 needs backporting (fixed from 6.8rc6)
+
+# CVE-2024-26759 needs backporting (fixed from 6.8rc6)
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26760"
+
+# fixed-version: only affects 5.19rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26761"
+
+# fixed-version: only affects 6.7rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26762"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26763"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26764"
+
+# CVE-2024-26765 needs backporting (fixed from 6.8rc6)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26766"
+
+# CVE-2024-26767 needs backporting (fixed from 6.8rc5)
+
+# CVE-2024-26768 needs backporting (fixed from 6.8rc4)
+
+# CVE-2024-26769 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26770 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26771 needs backporting (fixed from 6.8rc3)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26772"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26773"
+
+# CVE-2024-26774 needs backporting (fixed from 6.8rc3)
+
+# CVE-2024-26775 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26776 needs backporting (fixed from 6.8rc2)
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26777"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26778"
+
+# cpe-stable-backport: Backported in 5.4.270
+CVE_CHECK_WHITELIST += "CVE-2024-26779"
+
+# fixed-version: only affects 6.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26780"
+
+# fixed-version: only affects 6.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26781"
+
+# fixed-version: only affects 5.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26782"
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26783"
+
+# CVE-2024-26784 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26785"
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26786"
+
+# CVE-2024-26787 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26788"
+
+# CVE-2024-26789 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26790"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26791"
+
+# fixed-version: only affects 6.8rc4 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26792"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26793"
+
+# fixed-version: only affects 6.8rc6 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26794"
+
+# CVE-2024-26795 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.6rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26796"
+
+# CVE-2024-26797 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26798 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 5.18rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26799"
+
+# fixed-version: only affects 6.8rc5 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26800"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26801"
+
+# CVE-2024-26802 needs backporting (fixed from 6.8rc7)
+
+# CVE-2024-26803 needs backporting (fixed from 6.8rc7)
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26804"
+
+# cpe-stable-backport: Backported in 5.4.271
+CVE_CHECK_WHITELIST += "CVE-2024-26805"
+
+# CVE-2024-26806 needs backporting (fixed from 6.8rc7)
+
+# fixed-version: only affects 6.4rc1 onwards
+CVE_CHECK_WHITELIST += "CVE-2024-26807"
+
+# CVE-2024-26808 needs backporting (fixed from 6.8rc2)
+
+# CVE-2024-26809 needs backporting (fixed from 6.9rc1)
+
diff --git a/meta/recipes-kernel/linux/generate-cve-exclusions.py b/meta/recipes-kernel/linux/generate-cve-exclusions.py
new file mode 100755
index 0000000000..12ae3b0b1d
--- /dev/null
+++ b/meta/recipes-kernel/linux/generate-cve-exclusions.py
@@ -0,0 +1,101 @@
+#! /usr/bin/env python3
+
+# Generate granular CVE status metadata for a specific version of the kernel
+# using data from linuxkernelcves.com.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import datetime
+import json
+import pathlib
+import re
+
+from packaging.version import Version
+
+
+def parse_version(s):
+ """
+ Parse the version string and either return a packaging.version.Version, or
+ None if the string was unset or "unk".
+ """
+ if s and s != "unk":
+ # packaging.version.Version doesn't approve of versions like v5.12-rc1-dontuse
+ s = s.replace("-dontuse", "")
+ return Version(s)
+ return None
+
+
+def main(argp=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("datadir", type=pathlib.Path, help="Path to a clone of https://github.com/nluedtke/linux_kernel_cves")
+ parser.add_argument("version", type=Version, help="Kernel version number to generate data for, such as 6.1.38")
+
+ args = parser.parse_args(argp)
+ datadir = args.datadir
+ version = args.version
+ base_version = f"{version.major}.{version.minor}"
+
+ with open(datadir / "data" / "kernel_cves.json", "r") as f:
+ cve_data = json.load(f)
+
+ with open(datadir / "data" / "stream_fixes.json", "r") as f:
+ stream_data = json.load(f)
+
+ print(f"""
+# Auto-generated CVE metadata, DO NOT EDIT BY HAND.
+# Generated at {datetime.datetime.now()} for version {version}
+
+python check_kernel_cve_status_version() {{
+ this_version = "{version}"
+ kernel_version = d.getVar("LINUX_VERSION")
+ if kernel_version != this_version:
+ bb.warn("Kernel CVE status needs updating: generated for %s but kernel is %s" % (this_version, kernel_version))
+}}
+do_cve_check[prefuncs] += "check_kernel_cve_status_version"
+""")
+
+ for cve, data in cve_data.items():
+ if "affected_versions" not in data:
+ print(f"# Skipping {cve}, no affected_versions")
+ print()
+ continue
+
+ affected = data["affected_versions"]
+ first_affected, last_affected = re.search(r"(.+) to (.+)", affected).groups()
+ first_affected = parse_version(first_affected)
+ last_affected = parse_version(last_affected)
+
+ handled = False
+ if not last_affected:
+ print(f"# {cve} has no known resolution")
+ elif first_affected and version < first_affected:
+ print(f"# fixed-version: only affects {first_affected} onwards")
+ handled = True
+ elif last_affected < version:
+ print(f"# fixed-version: Fixed after version {last_affected}")
+ handled = True
+ else:
+ if cve in stream_data:
+ backport_data = stream_data[cve]
+ if base_version in backport_data:
+ backport_ver = Version(backport_data[base_version]["fixed_version"])
+ if backport_ver <= version:
+ print(f"# cpe-stable-backport: Backported in {backport_ver}")
+ handled = True
+ else:
+ # TODO print a note that the kernel needs bumping
+ print(f"# {cve} needs backporting (fixed from {backport_ver})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+ else:
+ print(f"# {cve} needs backporting (fixed from {last_affected})")
+
+ if handled:
+ print(f'CVE_CHECK_WHITELIST += "{cve}"')
+
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/meta/recipes-kernel/linux/linux-yocto-dev.bb b/meta/recipes-kernel/linux/linux-yocto-dev.bb
index 06a9108fab..a1c0de9981 100644
--- a/meta/recipes-kernel/linux/linux-yocto-dev.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-dev.bb
@@ -10,8 +10,6 @@
inherit kernel
require recipes-kernel/linux/linux-yocto.inc
-# for ncurses tests
-inherit pkgconfig
# provide this .inc to set specific revisions
include recipes-kernel/linux/linux-yocto-dev-revisions.inc
diff --git a/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb b/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
index bf5359d120..f912304858 100644
--- a/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb
@@ -11,13 +11,13 @@ python () {
raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it")
}
-SRCREV_machine ?= "24d323fa0e17bcd62c9cfe1fd4153c304a06f38c"
-SRCREV_meta ?= "3fecb08507e286d1458497faaf31d1a07cc7d373"
+SRCREV_machine ?= "c93e75bc334ba00df2d66411a0d79c4378cf4af8"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}"
-LINUX_VERSION ?= "5.4.192"
+LINUX_VERSION ?= "5.4.273"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
diff --git a/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb b/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
index dee636aca5..2f94782471 100644
--- a/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb
@@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig"
require recipes-kernel/linux/linux-yocto.inc
-LINUX_VERSION ?= "5.4.192"
+LINUX_VERSION ?= "5.4.273"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
@@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native"
KMETA = "kernel-meta"
KCONF_BSP_AUDIT_LEVEL = "2"
-SRCREV_machine_qemuarm ?= "460de085c07ab1a221317e6804c13657456c5368"
-SRCREV_machine ?= "b414a2fc5ce5f68c33d297d9cde4fef5437b773b"
-SRCREV_meta ?= "3fecb08507e286d1458497faaf31d1a07cc7d373"
+SRCREV_machine_qemuarm ?= "d29f3f3a932319053ad24d84b087b0a57908c1bc"
+SRCREV_machine ?= "b6480d09d84d09e7560daa5c1d73917292ae30c0"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
PV = "${LINUX_VERSION}+git${SRCPV}"
diff --git a/meta/recipes-kernel/linux/linux-yocto.inc b/meta/recipes-kernel/linux/linux-yocto.inc
index 0a4d528aab..2978c2fb90 100644
--- a/meta/recipes-kernel/linux/linux-yocto.inc
+++ b/meta/recipes-kernel/linux/linux-yocto.inc
@@ -56,3 +56,6 @@ do_install_append(){
# enable kernel-sample for oeqa/runtime/cases's ksample.py test
KERNEL_FEATURES_append_qemuall=" features/kernel-sample/kernel-sample.scc"
+
+# CVE exclusion
+include recipes-kernel/linux/cve-exclusion.inc
diff --git a/meta/recipes-kernel/linux/linux-yocto_5.4.bb b/meta/recipes-kernel/linux/linux-yocto_5.4.bb
index 680f40d208..108043bd98 100644
--- a/meta/recipes-kernel/linux/linux-yocto_5.4.bb
+++ b/meta/recipes-kernel/linux/linux-yocto_5.4.bb
@@ -1,6 +1,7 @@
KBRANCH ?= "v5.4/standard/base"
require recipes-kernel/linux/linux-yocto.inc
+include recipes-kernel/linux/cve-exclusion_5.4.inc
# board specific branches
KBRANCH_qemuarm ?= "v5.4/standard/arm-versatile-926ejs"
@@ -12,16 +13,16 @@ KBRANCH_qemux86 ?= "v5.4/standard/base"
KBRANCH_qemux86-64 ?= "v5.4/standard/base"
KBRANCH_qemumips64 ?= "v5.4/standard/mti-malta64"
-SRCREV_machine_qemuarm ?= "68a2ce69aaf2e8d96eef4aaccd70fc0ef7368a46"
-SRCREV_machine_qemuarm64 ?= "acfed0930d37a714d705645ff7cfbfbd0ad040e7"
-SRCREV_machine_qemumips ?= "e7046a2c8972e925cd2e6ac7f392abe87cbec5f5"
-SRCREV_machine_qemuppc ?= "997e06e0af674c27627eaa76a60b2f63cb16f38d"
-SRCREV_machine_qemuriscv64 ?= "85f0668fea1442bbcc2c8b1509d9f711b4b73649"
-SRCREV_machine_qemux86 ?= "85f0668fea1442bbcc2c8b1509d9f711b4b73649"
-SRCREV_machine_qemux86-64 ?= "85f0668fea1442bbcc2c8b1509d9f711b4b73649"
-SRCREV_machine_qemumips64 ?= "7b526cde12d78604b6f1e1ad62da31dcb729f35f"
-SRCREV_machine ?= "85f0668fea1442bbcc2c8b1509d9f711b4b73649"
-SRCREV_meta ?= "3fecb08507e286d1458497faaf31d1a07cc7d373"
+SRCREV_machine_qemuarm ?= "b7e0891bf4b281c4e29b86f708e10a3339670acc"
+SRCREV_machine_qemuarm64 ?= "ff75f0c7beb167391f0285dd2993394cd143a8a7"
+SRCREV_machine_qemumips ?= "650e43a19e625d1db9d8245cda27db7b86990398"
+SRCREV_machine_qemuppc ?= "0fb6546a09f90befecb11cd0f10274276e8a3021"
+SRCREV_machine_qemuriscv64 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemux86 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemux86-64 ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_machine_qemumips64 ?= "f59947f338319b1741db5dfac34f08399561ab25"
+SRCREV_machine ?= "fe901e2f4b156e9cf7ddb03f479f7339d28e398b"
+SRCREV_meta ?= "ecd382f3477fae022ad1881e4c39e810cdc3c760"
# remap qemuarm to qemuarma15 for the 5.4 kernel
# KMACHINE_qemuarm ?= "qemuarma15"
@@ -30,7 +31,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA
git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}"
LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814"
-LINUX_VERSION ?= "5.4.192"
+LINUX_VERSION ?= "5.4.273"
DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}"
DEPENDS += "openssl-native util-linux-native"
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-strncpy-equals-destination-size-warning.patch b/meta/recipes-kernel/lttng/lttng-modules/0001-fix-strncpy-equals-destination-size-warning.patch
deleted file mode 100644
index 6f82488772..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0001-fix-strncpy-equals-destination-size-warning.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From cb78974394a9af865e1d2d606e838dbec0de80e8 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 5 Oct 2020 15:31:42 -0400
-Subject: [PATCH 01/16] fix: strncpy equals destination size warning
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Some versions of GCC when called with -Wstringop-truncation will warn
-when doing a copy of the same size as the destination buffer with
-strncpy :
-
- ‘strncpy’ specified bound 256 equals destination size [-Werror=stringop-truncation]
-
-Since we unconditionally write '\0' in the last byte, reduce the copy
-size by one.
-
-Upstream-Status: Backport
-
-Change-Id: Idb907c9550817a06fc0dffc489740f63d440e7d4
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
----
- lttng-syscalls.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/lttng-syscalls.c b/lttng-syscalls.c
-index 49c0d81b..b43dd570 100644
---- a/lttng-syscalls.c
-+++ b/lttng-syscalls.c
-@@ -719,7 +719,7 @@ int fill_table(const struct trace_syscall_entry *table, size_t table_len,
- ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_COMPAT;
- break;
- }
-- strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-+ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN - 1);
- ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
- ev.instrumentation = LTTNG_KERNEL_SYSCALL;
- chan_table[i] = _lttng_event_create(chan, &ev, filter,
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0002-fix-objtool-Rename-frame.h-objtool.h-v5.10.patch b/meta/recipes-kernel/lttng/lttng-modules/0002-fix-objtool-Rename-frame.h-objtool.h-v5.10.patch
deleted file mode 100644
index 90d7b0cf9c..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0002-fix-objtool-Rename-frame.h-objtool.h-v5.10.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From 8e4e8641961df32bfe519fd18d899250951acd1a Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 13:41:02 -0400
-Subject: [PATCH 02/16] fix: objtool: Rename frame.h -> objtool.h (v5.10)
-
-See upstream commit :
-
- commit 00089c048eb4a8250325efb32a2724fd0da68cce
- Author: Julien Thierry <jthierry@redhat.com>
- Date: Fri Sep 4 16:30:25 2020 +0100
-
- objtool: Rename frame.h -> objtool.h
-
- Header frame.h is getting more code annotations to help objtool analyze
- object files.
-
- Rename the file to objtool.h.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: Ic2283161bebcbf1e33b72805eb4d2628f4ae3e89
----
- lttng-filter-interpreter.c | 2 +-
- wrapper/{frame.h => objtool.h} | 19 ++++++++++++-------
- 2 files changed, 13 insertions(+), 8 deletions(-)
- rename wrapper/{frame.h => objtool.h} (50%)
-
-diff --git a/lttng-filter-interpreter.c b/lttng-filter-interpreter.c
-index 21169f01..5d572437 100644
---- a/lttng-filter-interpreter.c
-+++ b/lttng-filter-interpreter.c
-@@ -8,7 +8,7 @@
- */
-
- #include <wrapper/uaccess.h>
--#include <wrapper/frame.h>
-+#include <wrapper/objtool.h>
- #include <wrapper/types.h>
- #include <linux/swab.h>
-
-diff --git a/wrapper/frame.h b/wrapper/objtool.h
-similarity index 50%
-rename from wrapper/frame.h
-rename to wrapper/objtool.h
-index 6e6dc811..3b997cae 100644
---- a/wrapper/frame.h
-+++ b/wrapper/objtool.h
-@@ -1,18 +1,23 @@
--/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
-+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
-- * wrapper/frame.h
-+ * wrapper/objtool.h
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
--#ifndef _LTTNG_WRAPPER_FRAME_H
--#define _LTTNG_WRAPPER_FRAME_H
-+#ifndef _LTTNG_WRAPPER_OBJTOOL_H
-+#define _LTTNG_WRAPPER_OBJTOOL_H
-
- #include <linux/version.h>
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
--
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+#include <linux/objtool.h>
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
- #include <linux/frame.h>
-+#endif
-+
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
-
- #define LTTNG_STACK_FRAME_NON_STANDARD(func) \
- STACK_FRAME_NON_STANDARD(func)
-@@ -23,4 +28,4 @@
-
- #endif
-
--#endif /* _LTTNG_WRAPPER_FRAME_H */
-+#endif /* _LTTNG_WRAPPER_OBJTOOL_H */
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0003-fix-btrfs-tracepoints-output-proper-root-owner-for-t.patch b/meta/recipes-kernel/lttng/lttng-modules/0003-fix-btrfs-tracepoints-output-proper-root-owner-for-t.patch
deleted file mode 100644
index 2a100361ea..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0003-fix-btrfs-tracepoints-output-proper-root-owner-for-t.patch
+++ /dev/null
@@ -1,316 +0,0 @@
-From 5a3b76a81fd3df52405700d369223d64c7a04dc8 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Tue, 27 Oct 2020 11:42:23 -0400
-Subject: [PATCH 03/16] fix: btrfs: tracepoints: output proper root owner for
- trace_find_free_extent() (v5.10)
-
-See upstream commit :
-
- commit 437490fed3b0c9ae21af8f70e0f338d34560842b
- Author: Qu Wenruo <wqu@suse.com>
- Date: Tue Jul 28 09:42:49 2020 +0800
-
- btrfs: tracepoints: output proper root owner for trace_find_free_extent()
-
- The current trace event always output result like this:
-
- find_free_extent: root=2(EXTENT_TREE) len=16384 empty_size=0 flags=4(METADATA)
- find_free_extent: root=2(EXTENT_TREE) len=16384 empty_size=0 flags=4(METADATA)
- find_free_extent: root=2(EXTENT_TREE) len=8192 empty_size=0 flags=1(DATA)
- find_free_extent: root=2(EXTENT_TREE) len=8192 empty_size=0 flags=1(DATA)
- find_free_extent: root=2(EXTENT_TREE) len=4096 empty_size=0 flags=1(DATA)
- find_free_extent: root=2(EXTENT_TREE) len=4096 empty_size=0 flags=1(DATA)
-
- T's saying we're allocating data extent for EXTENT tree, which is not
- even possible.
-
- It's because we always use EXTENT tree as the owner for
- trace_find_free_extent() without using the @root from
- btrfs_reserve_extent().
-
- This patch will change the parameter to use proper @root for
- trace_find_free_extent():
-
- Now it looks much better:
-
- find_free_extent: root=5(FS_TREE) len=16384 empty_size=0 flags=36(METADATA|DUP)
- find_free_extent: root=5(FS_TREE) len=8192 empty_size=0 flags=1(DATA)
- find_free_extent: root=5(FS_TREE) len=16384 empty_size=0 flags=1(DATA)
- find_free_extent: root=5(FS_TREE) len=4096 empty_size=0 flags=1(DATA)
- find_free_extent: root=5(FS_TREE) len=8192 empty_size=0 flags=1(DATA)
- find_free_extent: root=5(FS_TREE) len=16384 empty_size=0 flags=36(METADATA|DUP)
- find_free_extent: root=7(CSUM_TREE) len=16384 empty_size=0 flags=36(METADATA|DUP)
- find_free_extent: root=2(EXTENT_TREE) len=16384 empty_size=0 flags=36(METADATA|DUP)
- find_free_extent: root=1(ROOT_TREE) len=16384 empty_size=0 flags=36(METADATA|DUP)
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I1d674064d29b31417e2acffdeb735f5052a87032
----
- instrumentation/events/lttng-module/btrfs.h | 206 ++++++++++++--------
- 1 file changed, 122 insertions(+), 84 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/btrfs.h b/instrumentation/events/lttng-module/btrfs.h
-index 7b290085..52fcfd0d 100644
---- a/instrumentation/events/lttng-module/btrfs.h
-+++ b/instrumentation/events/lttng-module/btrfs.h
-@@ -1856,7 +1856,29 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserved_extent, btrfs_reserved_extent_f
-
- #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0))
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0) || \
-+ LTTNG_KERNEL_RANGE(5,9,6, 5,10,0) || \
-+ LTTNG_KERNEL_RANGE(5,4,78, 5,5,0))
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(const struct btrfs_root *root, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(root, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_array(u8, fsid, root->lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-+ ctf_integer(u64, root_objectid, root->root_key.objectid)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0))
-+
- LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-
- btrfs_find_free_extent,
-@@ -1874,6 +1896,105 @@ LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
- )
- )
-
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(fs_info, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(fs_info, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(fs_info, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+
-+#elif (LTTNG_SLE_KERNEL_RANGE(4,4,73,5,0,0, 4,4,73,6,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,82,6,0,0, 4,4,82,7,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,92,6,0,0, 4,4,92,7,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,103,6,0,0, 4,5,0,0,0,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(const struct btrfs_root *root, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(root, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_integer(u64, root_objectid, root->root_key.objectid)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-+
-+ btrfs_find_free_extent,
-+
-+ TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
-+ u64 data),
-+
-+ TP_ARGS(root, num_bytes, empty_size, data),
-+
-+ TP_FIELDS(
-+ ctf_integer(u64, root_objectid, root->root_key.objectid)
-+ ctf_integer(u64, num_bytes, num_bytes)
-+ ctf_integer(u64, empty_size, empty_size)
-+ ctf_integer(u64, data, data)
-+ )
-+)
-+#endif
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0))
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
- TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
-@@ -1907,22 +2028,6 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserve_extent, btrfs_reserve_extent_clus
- )
-
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0))
--LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
--
-- btrfs_find_free_extent,
--
-- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-- u64 data),
--
-- TP_ARGS(fs_info, num_bytes, empty_size, data),
--
-- TP_FIELDS(
-- ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-- ctf_integer(u64, num_bytes, num_bytes)
-- ctf_integer(u64, empty_size, empty_size)
-- ctf_integer(u64, data, data)
-- )
--)
-
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
-@@ -1957,22 +2062,6 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserve_extent, btrfs_reserve_extent_clus
- )
-
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
--LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
--
-- btrfs_find_free_extent,
--
-- TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-- u64 data),
--
-- TP_ARGS(fs_info, num_bytes, empty_size, data),
--
-- TP_FIELDS(
-- ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-- ctf_integer(u64, num_bytes, num_bytes)
-- ctf_integer(u64, empty_size, empty_size)
-- ctf_integer(u64, data, data)
-- )
--)
-
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
-@@ -2011,23 +2100,6 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserve_extent, btrfs_reserve_extent_clus
-
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
--LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
--
-- btrfs_find_free_extent,
--
-- TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
-- u64 data),
--
-- TP_ARGS(fs_info, num_bytes, empty_size, data),
--
-- TP_FIELDS(
-- ctf_array(u8, fsid, lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-- ctf_integer(u64, num_bytes, num_bytes)
-- ctf_integer(u64, empty_size, empty_size)
-- ctf_integer(u64, data, data)
-- )
--)
--
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
- TP_PROTO(struct btrfs_fs_info *fs_info,
-@@ -2066,23 +2138,6 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserve_extent, btrfs_reserve_extent_clus
- LTTNG_SLE_KERNEL_RANGE(4,4,92,6,0,0, 4,4,92,7,0,0) || \
- LTTNG_SLE_KERNEL_RANGE(4,4,103,6,0,0, 4,5,0,0,0,0))
-
--LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
--
-- btrfs_find_free_extent,
--
-- TP_PROTO(const struct btrfs_root *root, u64 num_bytes, u64 empty_size,
-- u64 data),
--
-- TP_ARGS(root, num_bytes, empty_size, data),
--
-- TP_FIELDS(
-- ctf_integer(u64, root_objectid, root->root_key.objectid)
-- ctf_integer(u64, num_bytes, num_bytes)
-- ctf_integer(u64, empty_size, empty_size)
-- ctf_integer(u64, data, data)
-- )
--)
--
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
- TP_PROTO(const struct btrfs_root *root,
-@@ -2120,23 +2175,6 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserve_extent, btrfs_reserve_extent_clus
-
- #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
-
--LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
--
-- btrfs_find_free_extent,
--
-- TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
-- u64 data),
--
-- TP_ARGS(root, num_bytes, empty_size, data),
--
-- TP_FIELDS(
-- ctf_integer(u64, root_objectid, root->root_key.objectid)
-- ctf_integer(u64, num_bytes, num_bytes)
-- ctf_integer(u64, empty_size, empty_size)
-- ctf_integer(u64, data, data)
-- )
--)
--
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__reserve_extent,
-
- TP_PROTO(struct btrfs_root *root,
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0004-fix-btrfs-make-ordered-extent-tracepoint-take-btrfs_.patch b/meta/recipes-kernel/lttng/lttng-modules/0004-fix-btrfs-make-ordered-extent-tracepoint-take-btrfs_.patch
deleted file mode 100644
index 67025418c3..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0004-fix-btrfs-make-ordered-extent-tracepoint-take-btrfs_.patch
+++ /dev/null
@@ -1,179 +0,0 @@
-From d51a3332909ff034c8ec16ead0090bd6a4e2bc38 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Tue, 27 Oct 2020 12:10:05 -0400
-Subject: [PATCH 04/16] fix: btrfs: make ordered extent tracepoint take
- btrfs_inode (v5.10)
-
-See upstream commit :
-
- commit acbf1dd0fcbd10c67826a19958f55a053b32f532
- Author: Nikolay Borisov <nborisov@suse.com>
- Date: Mon Aug 31 14:42:40 2020 +0300
-
- btrfs: make ordered extent tracepoint take btrfs_inode
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I096d0801ffe0ad826cfe414cdd1c0857cbd2b624
----
- instrumentation/events/lttng-module/btrfs.h | 120 +++++++++++++++-----
- 1 file changed, 90 insertions(+), 30 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/btrfs.h b/instrumentation/events/lttng-module/btrfs.h
-index 52fcfd0d..d47f3280 100644
---- a/instrumentation/events/lttng-module/btrfs.h
-+++ b/instrumentation/events/lttng-module/btrfs.h
-@@ -346,7 +346,29 @@ LTTNG_TRACEPOINT_EVENT(btrfs_handle_em_exist,
- )
- #endif
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__ordered_extent,
-+
-+ TP_PROTO(const struct btrfs_inode *inode,
-+ const struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered),
-+
-+ TP_FIELDS(
-+ ctf_array(u8, fsid, inode->root->lttng_fs_info_fsid, BTRFS_UUID_SIZE)
-+ ctf_integer(ino_t, ino, btrfs_ino(inode))
-+ ctf_integer(u64, file_offset, ordered->file_offset)
-+ ctf_integer(u64, start, ordered->disk_bytenr)
-+ ctf_integer(u64, len, ordered->num_bytes)
-+ ctf_integer(u64, disk_len, ordered->disk_num_bytes)
-+ ctf_integer(u64, bytes_left, ordered->bytes_left)
-+ ctf_integer(unsigned long, flags, ordered->flags)
-+ ctf_integer(int, compress_type, ordered->compress_type)
-+ ctf_integer(int, refs, refcount_read(&ordered->refs))
-+ ctf_integer(u64, root_objectid, inode->root->root_key.objectid)
-+ )
-+)
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__ordered_extent,
-
- TP_PROTO(const struct inode *inode,
-@@ -458,7 +480,39 @@ LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__ordered_extent,
- )
- #endif
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) || \
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_add,
-+
-+ TP_PROTO(const struct btrfs_inode *inode,
-+ const struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_remove,
-+
-+ TP_PROTO(const struct btrfs_inode *inode,
-+ const struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_start,
-+
-+ TP_PROTO(const struct btrfs_inode *inode,
-+ const struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_put,
-+
-+ TP_PROTO(const struct btrfs_inode *inode,
-+ const struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) || \
- LTTNG_SLE_KERNEL_RANGE(4,4,73,5,0,0, 4,4,73,6,0,0) || \
- LTTNG_SLE_KERNEL_RANGE(4,4,82,6,0,0, 4,4,82,7,0,0) || \
- LTTNG_SLE_KERNEL_RANGE(4,4,92,6,0,0, 4,4,92,7,0,0) || \
-@@ -494,7 +548,41 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_put,
-
- TP_ARGS(inode, ordered)
- )
-+#else
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_add,
-+
-+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_remove,
-+
-+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_start,
-+
-+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-
-+LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_put,
-+
-+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
-+
-+ TP_ARGS(inode, ordered)
-+)
-+#endif
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,73,5,0,0, 4,4,73,6,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,82,6,0,0, 4,4,82,7,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,92,6,0,0, 4,4,92,7,0,0) || \
-+ LTTNG_SLE_KERNEL_RANGE(4,4,103,6,0,0, 4,5,0,0,0,0))
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__writepage,
-
- TP_PROTO(const struct page *page, const struct inode *inode,
-@@ -563,34 +651,6 @@ LTTNG_TRACEPOINT_EVENT(btrfs_sync_file,
- )
- )
- #else
--LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_add,
--
-- TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
--
-- TP_ARGS(inode, ordered)
--)
--
--LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_remove,
--
-- TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
--
-- TP_ARGS(inode, ordered)
--)
--
--LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_start,
--
-- TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
--
-- TP_ARGS(inode, ordered)
--)
--
--LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__ordered_extent, btrfs_ordered_extent_put,
--
-- TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
--
-- TP_ARGS(inode, ordered)
--)
--
- LTTNG_TRACEPOINT_EVENT_CLASS(btrfs__writepage,
-
- TP_PROTO(struct page *page, struct inode *inode,
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-fast-commit-recovery-path-v5.10.patch b/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-fast-commit-recovery-path-v5.10.patch
deleted file mode 100644
index 63d97fa4a3..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-fast-commit-recovery-path-v5.10.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From b96f5364ba4d5a8b9e8159fe0b9e20d598a1c0f5 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 17:03:23 -0400
-Subject: [PATCH 05/16] fix: ext4: fast commit recovery path (v5.10)
-
-See upstream commit :
-
- commit 8016e29f4362e285f0f7e38fadc61a5b7bdfdfa2
- Author: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
- Date: Thu Oct 15 13:37:59 2020 -0700
-
- ext4: fast commit recovery path
-
- This patch adds fast commit recovery path support for Ext4 file
- system. We add several helper functions that are similar in spirit to
- e2fsprogs journal recovery path handlers. Example of such functions
- include - a simple block allocator, idempotent block bitmap update
- function etc. Using these routines and the fast commit log in the fast
- commit area, the recovery path (ext4_fc_replay()) performs fast commit
- log recovery.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: Ia65cf44e108f2df0b458f0d335f33a8f18f50baa
----
- instrumentation/events/lttng-module/ext4.h | 40 ++++++++++++++++++++++
- 1 file changed, 40 insertions(+)
-
-diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h
-index f9a55e29..5fddccad 100644
---- a/instrumentation/events/lttng-module/ext4.h
-+++ b/instrumentation/events/lttng-module/ext4.h
-@@ -1423,6 +1423,18 @@ LTTNG_TRACEPOINT_EVENT(ext4_ext_load_extent,
- )
- )
-
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT(ext4_load_inode,
-+ TP_PROTO(struct super_block *sb, unsigned long ino),
-+
-+ TP_ARGS(sb, ino),
-+
-+ TP_FIELDS(
-+ ctf_integer(dev_t, dev, sb->s_dev)
-+ ctf_integer(ino_t, ino, ino)
-+ )
-+)
-+#else
- LTTNG_TRACEPOINT_EVENT(ext4_load_inode,
- TP_PROTO(struct inode *inode),
-
-@@ -2045,6 +2057,34 @@ LTTNG_TRACEPOINT_EVENT(ext4_es_shrink_exit,
-
- #endif
-
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT(ext4_fc_replay_scan,
-+ TP_PROTO(struct super_block *sb, int error, int off),
-+
-+ TP_ARGS(sb, error, off),
-+
-+ TP_FIELDS(
-+ ctf_integer(dev_t, dev, sb->s_dev)
-+ ctf_integer(int, error, error)
-+ ctf_integer(int, off, off)
-+ )
-+)
-+
-+LTTNG_TRACEPOINT_EVENT(ext4_fc_replay,
-+ TP_PROTO(struct super_block *sb, int tag, int ino, int priv1, int priv2),
-+
-+ TP_ARGS(sb, tag, ino, priv1, priv2),
-+
-+ TP_FIELDS(
-+ ctf_integer(dev_t, dev, sb->s_dev)
-+ ctf_integer(int, tag, tag)
-+ ctf_integer(int, ino, ino)
-+ ctf_integer(int, priv1, priv1)
-+ ctf_integer(int, priv2, priv2)
-+ )
-+)
-+#endif
-+
- #endif /* LTTNG_TRACE_EXT4_H */
-
- /* This part must be outside protection */
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0006-fix-KVM-x86-Add-intr-vectoring-info-and-error-code-t.patch b/meta/recipes-kernel/lttng/lttng-modules/0006-fix-KVM-x86-Add-intr-vectoring-info-and-error-code-t.patch
deleted file mode 100644
index 56c563cea3..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0006-fix-KVM-x86-Add-intr-vectoring-info-and-error-code-t.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-From a6334775b763c187d84914e89a0b835a793ae0fd Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 14:11:17 -0400
-Subject: [PATCH 06/16] fix: KVM: x86: Add intr/vectoring info and error code
- to kvm_exit tracepoint (v5.10)
-
-See upstream commit :
-
- commit 235ba74f008d2e0936b29f77f68d4e2f73ffd24a
- Author: Sean Christopherson <sean.j.christopherson@intel.com>
- Date: Wed Sep 23 13:13:46 2020 -0700
-
- KVM: x86: Add intr/vectoring info and error code to kvm_exit tracepoint
-
- Extend the kvm_exit tracepoint to align it with kvm_nested_vmexit in
- terms of what information is captured. On SVM, add interrupt info and
- error code, while on VMX it add IDT vectoring and error code. This
- sets the stage for macrofying the kvm_exit tracepoint definition so that
- it can be reused for kvm_nested_vmexit without loss of information.
-
- Opportunistically stuff a zero for VM_EXIT_INTR_INFO if the VM-Enter
- failed, as the field is guaranteed to be invalid. Note, it'd be
- possible to further filter the interrupt/exception fields based on the
- VM-Exit reason, but the helper is intended only for tracepoints, i.e.
- an extra VMREAD or two is a non-issue, the failed VM-Enter case is just
- low hanging fruit.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I638fa29ef7d8bb432de42a33f9ae4db43259b915
----
- .../events/lttng-module/arch/x86/kvm/trace.h | 55 ++++++++++++++++++-
- 1 file changed, 53 insertions(+), 2 deletions(-)
-
-diff --git a/instrumentation/events/lttng-module/arch/x86/kvm/trace.h b/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
-index 4416ae02..0917b51f 100644
---- a/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
-+++ b/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
-@@ -115,6 +115,37 @@ LTTNG_TRACEPOINT_EVENT_MAP(kvm_apic, kvm_x86_apic,
- /*
- * Tracepoint for kvm guest exit:
- */
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
-+ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
-+ TP_ARGS(exit_reason, vcpu, isa),
-+
-+ TP_locvar(
-+ u64 info1, info2;
-+ u32 intr_info, error_code;
-+ ),
-+
-+ TP_code_pre(
-+ kvm_x86_ops.get_exit_info(vcpu, &tp_locvar->info1,
-+ &tp_locvar->info2,
-+ &tp_locvar->intr_info,
-+ &tp_locvar->error_code);
-+ ),
-+
-+ TP_FIELDS(
-+ ctf_integer(unsigned int, exit_reason, exit_reason)
-+ ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
-+ ctf_integer(u32, isa, isa)
-+ ctf_integer(u64, info1, tp_locvar->info1)
-+ ctf_integer(u64, info2, tp_locvar->info2)
-+ ctf_integer(u32, intr_info, tp_locvar->intr_info)
-+ ctf_integer(u32, error_code, tp_locvar->error_code)
-+ ctf_integer(unsigned int, vcpu_id, vcpu->vcpu_id)
-+ ),
-+
-+ TP_code_post()
-+)
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
- LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
- TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
- TP_ARGS(exit_reason, vcpu, isa),
-@@ -124,13 +155,32 @@ LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
- ),
-
- TP_code_pre(
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0))
- kvm_x86_ops.get_exit_info(vcpu, &tp_locvar->info1,
- &tp_locvar->info2);
-+ ),
-+
-+ TP_FIELDS(
-+ ctf_integer(unsigned int, exit_reason, exit_reason)
-+ ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
-+ ctf_integer(u32, isa, isa)
-+ ctf_integer(u64, info1, tp_locvar->info1)
-+ ctf_integer(u64, info2, tp_locvar->info2)
-+ ),
-+
-+ TP_code_post()
-+)
- #else
-+LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
-+ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
-+ TP_ARGS(exit_reason, vcpu, isa),
-+
-+ TP_locvar(
-+ u64 info1, info2;
-+ ),
-+
-+ TP_code_pre(
- kvm_x86_ops->get_exit_info(vcpu, &tp_locvar->info1,
- &tp_locvar->info2);
--#endif
- ),
-
- TP_FIELDS(
-@@ -143,6 +193,7 @@ LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
-
- TP_code_post()
- )
-+#endif
-
- /*
- * Tracepoint for kvm interrupt injection:
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kvm-x86-mmu-Add-TDP-MMU-PF-handler-v5.10.patch b/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kvm-x86-mmu-Add-TDP-MMU-PF-handler-v5.10.patch
deleted file mode 100644
index d78a8c25c7..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0007-fix-kvm-x86-mmu-Add-TDP-MMU-PF-handler-v5.10.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From 2f421c43c60b2c9d3ed63c1a363320e98a536a35 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 14:28:35 -0400
-Subject: [PATCH 07/16] fix: kvm: x86/mmu: Add TDP MMU PF handler (v5.10)
-
-See upstream commit :
-
- commit bb18842e21111a979e2e0e1c5d85c09646f18d51
- Author: Ben Gardon <bgardon@google.com>
- Date: Wed Oct 14 11:26:50 2020 -0700
-
- kvm: x86/mmu: Add TDP MMU PF handler
-
- Add functions to handle page faults in the TDP MMU. These page faults
- are currently handled in much the same way as the x86 shadow paging
- based MMU, however the ordering of some operations is slightly
- different. Future patches will add eager NX splitting, a fast page fault
- handler, and parallel page faults.
-
- Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
- machine. This series introduced no new failures.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: Ie56959cb6c77913d2f1188b0ca15da9114623a4e
----
- .../lttng-module/arch/x86/kvm/mmutrace.h | 20 ++++++++++++++++++-
- probes/lttng-probe-kvm-x86-mmu.c | 5 +++++
- 2 files changed, 24 insertions(+), 1 deletion(-)
-
-diff --git a/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h b/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-index e5470400..86717835 100644
---- a/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-+++ b/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-@@ -163,7 +163,25 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
- TP_ARGS(sp)
- )
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+
-+LTTNG_TRACEPOINT_EVENT_MAP(
-+ mark_mmio_spte,
-+
-+ kvm_mmu_mark_mmio_spte,
-+
-+ TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
-+ TP_ARGS(sptep, gfn, spte),
-+
-+ TP_FIELDS(
-+ ctf_integer_hex(void *, sptep, sptep)
-+ ctf_integer(gfn_t, gfn, gfn)
-+ ctf_integer(unsigned, access, spte & ACC_ALL)
-+ ctf_integer(unsigned int, gen, get_mmio_spte_generation(spte))
-+ )
-+)
-+
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
-
- LTTNG_TRACEPOINT_EVENT_MAP(
- mark_mmio_spte,
-diff --git a/probes/lttng-probe-kvm-x86-mmu.c b/probes/lttng-probe-kvm-x86-mmu.c
-index 8f981865..5043c776 100644
---- a/probes/lttng-probe-kvm-x86-mmu.c
-+++ b/probes/lttng-probe-kvm-x86-mmu.c
-@@ -31,6 +31,11 @@
- #include <../../arch/x86/kvm/mmutrace.h>
- #endif
-
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+#include <../arch/x86/kvm/mmu.h>
-+#include <../arch/x86/kvm/mmu/spte.h>
-+#endif
-+
- #undef TRACE_INCLUDE_PATH
- #undef TRACE_INCLUDE_FILE
-
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0008-fix-KVM-x86-mmu-Return-unique-RET_PF_-values-if-the-.patch b/meta/recipes-kernel/lttng/lttng-modules/0008-fix-KVM-x86-mmu-Return-unique-RET_PF_-values-if-the-.patch
deleted file mode 100644
index a71bb728f0..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0008-fix-KVM-x86-mmu-Return-unique-RET_PF_-values-if-the-.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From 14bbccffa579f4d66e2900843d6afae1294ce7c8 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 17:07:13 -0400
-Subject: [PATCH 08/16] fix: KVM: x86/mmu: Return unique RET_PF_* values if the
- fault was fixed (v5.10)
-
-See upstream commit :
-
- commit c4371c2a682e0da1ed2cd7e3c5496f055d873554
- Author: Sean Christopherson <sean.j.christopherson@intel.com>
- Date: Wed Sep 23 15:04:24 2020 -0700
-
- KVM: x86/mmu: Return unique RET_PF_* values if the fault was fixed
-
- Introduce RET_PF_FIXED and RET_PF_SPURIOUS to provide unique return
- values instead of overloading RET_PF_RETRY. In the short term, the
- unique values add clarity to the code and RET_PF_SPURIOUS will be used
- by set_spte() to avoid unnecessary work for spurious faults.
-
- In the long term, TDX will use RET_PF_FIXED to deterministically map
- memory during pre-boot. The page fault flow may bail early for benign
- reasons, e.g. if the mmu_notifier fires for an unrelated address. With
- only RET_PF_RETRY, it's impossible for the caller to distinguish between
- "cool, page is mapped" and "darn, need to try again", and thus cannot
- handle benign cases like the mmu_notifier retry.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: Ie0855c78852b45f588e131fe2463e15aae1bc023
----
- .../lttng-module/arch/x86/kvm/mmutrace.h | 22 ++++++++++++++++++-
- 1 file changed, 21 insertions(+), 1 deletion(-)
-
-diff --git a/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h b/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-index 86717835..cdf0609f 100644
---- a/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-+++ b/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
-@@ -233,7 +233,27 @@ LTTNG_TRACEPOINT_EVENT_MAP(
- )
- )
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || \
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+LTTNG_TRACEPOINT_EVENT_MAP(
-+ fast_page_fault,
-+
-+ kvm_mmu_fast_page_fault,
-+
-+ TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
-+ u64 *sptep, u64 old_spte, int ret),
-+ TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
-+
-+ TP_FIELDS(
-+ ctf_integer(int, vcpu_id, vcpu->vcpu_id)
-+ ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
-+ ctf_integer(u32, error_code, error_code)
-+ ctf_integer_hex(u64 *, sptep, sptep)
-+ ctf_integer(u64, old_spte, old_spte)
-+ ctf_integer(u64, new_spte, *sptep)
-+ ctf_integer(int, ret, ret)
-+ )
-+)
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) || \
- LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
- LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
- LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0009-fix-tracepoint-Optimize-using-static_call-v5.10.patch b/meta/recipes-kernel/lttng/lttng-modules/0009-fix-tracepoint-Optimize-using-static_call-v5.10.patch
deleted file mode 100644
index b942aa5c95..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0009-fix-tracepoint-Optimize-using-static_call-v5.10.patch
+++ /dev/null
@@ -1,155 +0,0 @@
-From c6b31b349fe901a8f586a66064f9e9b15449ac1c Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 26 Oct 2020 17:09:05 -0400
-Subject: [PATCH 09/16] fix: tracepoint: Optimize using static_call() (v5.10)
-
-See upstream commit :
-
- commit d25e37d89dd2f41d7acae0429039d2f0ae8b4a07
- Author: Steven Rostedt (VMware) <rostedt@goodmis.org>
- Date: Tue Aug 18 15:57:52 2020 +0200
-
- tracepoint: Optimize using static_call()
-
- Currently the tracepoint site will iterate a vector and issue indirect
- calls to however many handlers are registered (ie. the vector is
- long).
-
- Using static_call() it is possible to optimize this for the common
- case of only having a single handler registered. In this case the
- static_call() can directly call this handler. Otherwise, if the vector
- is longer than 1, call a function that iterates the whole vector like
- the current code.
-
-Upstream-Status: Backport
-
-Change-Id: I739dd84d62cc1a821b8bd8acff74fa29aa25d22f
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- lttng-statedump-impl.c | 44 ++++++++++++++++++++++++++++++++-------
- probes/lttng.c | 7 +++++--
- tests/probes/lttng-test.c | 7 ++++++-
- wrapper/tracepoint.h | 8 +++++++
- 4 files changed, 56 insertions(+), 10 deletions(-)
-
-diff --git a/lttng-statedump-impl.c b/lttng-statedump-impl.c
-index 54a309d1..e0b19b42 100644
---- a/lttng-statedump-impl.c
-+++ b/lttng-statedump-impl.c
-@@ -55,13 +55,43 @@
- #define LTTNG_INSTRUMENTATION
- #include <instrumentation/events/lttng-module/lttng-statedump.h>
-
--DEFINE_TRACE(lttng_statedump_block_device);
--DEFINE_TRACE(lttng_statedump_end);
--DEFINE_TRACE(lttng_statedump_interrupt);
--DEFINE_TRACE(lttng_statedump_file_descriptor);
--DEFINE_TRACE(lttng_statedump_start);
--DEFINE_TRACE(lttng_statedump_process_state);
--DEFINE_TRACE(lttng_statedump_network_interface);
-+LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
-+ TP_PROTO(struct lttng_session *session,
-+ dev_t dev, const char *diskname),
-+ TP_ARGS(session, dev, diskname));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_end,
-+ TP_PROTO(struct lttng_session *session),
-+ TP_ARGS(session));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
-+ TP_PROTO(struct lttng_session *session,
-+ unsigned int irq, const char *chip_name,
-+ struct irqaction *action),
-+ TP_ARGS(session, irq, chip_name, action));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
-+ TP_PROTO(struct lttng_session *session,
-+ struct files_struct *files,
-+ int fd, const char *filename,
-+ unsigned int flags, fmode_t fmode),
-+ TP_ARGS(session, files, fd, filename, flags, fmode));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_start,
-+ TP_PROTO(struct lttng_session *session),
-+ TP_ARGS(session));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
-+ TP_PROTO(struct lttng_session *session,
-+ struct task_struct *p,
-+ int type, int mode, int submode, int status,
-+ struct files_struct *files),
-+ TP_ARGS(session, p, type, mode, submode, status, files));
-+
-+LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
-+ TP_PROTO(struct lttng_session *session,
-+ struct net_device *dev, struct in_ifaddr *ifa),
-+ TP_ARGS(session, dev, ifa));
-
- struct lttng_fd_ctx {
- char *page;
-diff --git a/probes/lttng.c b/probes/lttng.c
-index 05bc1388..7ddaa69f 100644
---- a/probes/lttng.c
-+++ b/probes/lttng.c
-@@ -8,7 +8,7 @@
- */
-
- #include <linux/module.h>
--#include <linux/tracepoint.h>
-+#include <wrapper/tracepoint.h>
- #include <linux/uaccess.h>
- #include <linux/gfp.h>
- #include <linux/fs.h>
-@@ -32,7 +32,10 @@
- #define LTTNG_LOGGER_COUNT_MAX 1024
- #define LTTNG_LOGGER_FILE "lttng-logger"
-
--DEFINE_TRACE(lttng_logger);
-+LTTNG_DEFINE_TRACE(lttng_logger,
-+ PARAMS(const char __user *text, size_t len),
-+ PARAMS(text, len)
-+);
-
- static struct proc_dir_entry *lttng_logger_dentry;
-
-diff --git a/tests/probes/lttng-test.c b/tests/probes/lttng-test.c
-index c728bed5..8f2d3feb 100644
---- a/tests/probes/lttng-test.c
-+++ b/tests/probes/lttng-test.c
-@@ -26,7 +26,12 @@
- #define LTTNG_INSTRUMENTATION
- #include <instrumentation/events/lttng-module/lttng-test.h>
-
--DEFINE_TRACE(lttng_test_filter_event);
-+LTTNG_DEFINE_TRACE(lttng_test_filter_event,
-+ PARAMS(int anint, int netint, long *values,
-+ char *text, size_t textlen,
-+ char *etext, uint32_t * net_values),
-+ PARAMS(anint, netint, values, text, textlen, etext, net_values)
-+);
-
- #define LTTNG_TEST_FILTER_EVENT_FILE "lttng-test-filter-event"
-
-diff --git a/wrapper/tracepoint.h b/wrapper/tracepoint.h
-index 3883e11a..758038b6 100644
---- a/wrapper/tracepoint.h
-+++ b/wrapper/tracepoint.h
-@@ -20,6 +20,14 @@
-
- #endif
-
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
-+#define LTTNG_DEFINE_TRACE(name, proto, args) \
-+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
-+#else
-+#define LTTNG_DEFINE_TRACE(name, proto, args) \
-+ DEFINE_TRACE(name)
-+#endif
-+
- #ifndef HAVE_KABI_2635_TRACEPOINT
-
- #define kabi_2635_tracepoint_probe_register tracepoint_probe_register
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0010-fix-include-order-for-older-kernels.patch b/meta/recipes-kernel/lttng/lttng-modules/0010-fix-include-order-for-older-kernels.patch
deleted file mode 100644
index 250e9c6261..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0010-fix-include-order-for-older-kernels.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-From 2ce89d35c9477d8c17c00489c72e1548e16af9b9 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Fri, 20 Nov 2020 11:42:30 -0500
-Subject: [PATCH 10/16] fix: include order for older kernels
-
-Fixes a build failure on v3.0 and v3.1.
-
-Upstream-Status: Backport
-
-Change-Id: Ic48512d2aa5ee46678e67d147b92dba6d0959615
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- lttng-events.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/lttng-events.h b/lttng-events.h
-index 099fd78b..f5cc57c6 100644
---- a/lttng-events.h
-+++ b/lttng-events.h
-@@ -16,6 +16,7 @@
- #include <linux/kref.h>
- #include <lttng-cpuhotplug.h>
- #include <linux/uuid.h>
-+#include <linux/irq_work.h>
- #include <wrapper/uprobes.h>
- #include <lttng-tracer.h>
- #include <lttng-abi.h>
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0011-Add-release-maintainer-script.patch b/meta/recipes-kernel/lttng/lttng-modules/0011-Add-release-maintainer-script.patch
deleted file mode 100644
index d25d64b9de..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0011-Add-release-maintainer-script.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 22ffa48439e617a32556365e00827fba062c5688 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 23 Nov 2020 10:49:57 -0500
-Subject: [PATCH 11/16] Add release maintainer script
-
-Upstream-Status: Backport
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- scripts/maintainer/do-release.sh | 37 ++++++++++++++++++++++++++++++++
- 1 file changed, 37 insertions(+)
- create mode 100755 scripts/maintainer/do-release.sh
-
-diff --git a/scripts/maintainer/do-release.sh b/scripts/maintainer/do-release.sh
-new file mode 100755
-index 00000000..e0cec167
---- /dev/null
-+++ b/scripts/maintainer/do-release.sh
-@@ -0,0 +1,37 @@
-+#!/bin/sh
-+
-+# invoke with do-release 2.N.M, or 2.N.M-rcXX
-+
-+REL=$1
-+SRCDIR=~/git/lttng-modules
-+# The output files are created in ${HOME}/stable/
-+OUTPUTDIR=${HOME}/stable
-+
-+if [ x"$1" = x"" ]; then
-+ echo "1 arg : VERSION";
-+ exit 1;
-+fi
-+
-+cd ${OUTPUTDIR}
-+
-+echo Doing LTTng modules release ${REL}
-+
-+mkdir lttng-modules-${REL}
-+cd lttng-modules-${REL}
-+cp -ax ${SRCDIR}/. .
-+
-+#cleanup
-+make clean
-+git clean -xdf
-+
-+for a in \*.orig \*.rej Module.markers Module.symvers; do
-+ find . -name "${a}" -exec rm '{}' \;;
-+done
-+for a in outgoing .tmp_versions .git .pc; do
-+ find . -name "${a}" -exec rm -rf '{}' \;;
-+done
-+
-+cd ..
-+tar cvfj lttng-modules-${REL}.tar.bz2 lttng-modules-${REL}
-+mksums lttng-modules-${REL}.tar.bz2
-+signpkg lttng-modules-${REL}.tar.bz2
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0012-Improve-the-release-script.patch b/meta/recipes-kernel/lttng/lttng-modules/0012-Improve-the-release-script.patch
deleted file mode 100644
index f5e7fb55a2..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0012-Improve-the-release-script.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-From a241d30fa82ed0be1026f14e36e8bd2b0e65740d Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 23 Nov 2020 12:15:43 -0500
-Subject: [PATCH 12/16] Improve the release script
-
- * Use git-archive, this removes all custom code to cleanup the repo, it
- can now be used in an unclean repo as the code will be exported from
- a specific tag.
- * Add parameters, this will allow using the script on any machine
- while keeping the default behavior for the maintainer.
-
-Upstream-Status: Backport
-
-Change-Id: I9f29d0e1afdbf475d0bbaeb9946ca3216f725e86
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- .gitattributes | 3 +
- scripts/maintainer/do-release.sh | 121 +++++++++++++++++++++++++------
- 2 files changed, 100 insertions(+), 24 deletions(-)
- create mode 100644 .gitattributes
-
-diff --git a/.gitattributes b/.gitattributes
-new file mode 100644
-index 00000000..7839355a
---- /dev/null
-+++ b/.gitattributes
-@@ -0,0 +1,3 @@
-+.gitattributes export-ignore
-+.gitignore export-ignore
-+.gitreview export-ignore
-diff --git a/scripts/maintainer/do-release.sh b/scripts/maintainer/do-release.sh
-index e0cec167..5e94e136 100755
---- a/scripts/maintainer/do-release.sh
-+++ b/scripts/maintainer/do-release.sh
-@@ -1,37 +1,110 @@
--#!/bin/sh
-+#!/bin/bash
-+
-+set -eu
-+set -o pipefail
-
- # invoke with do-release 2.N.M, or 2.N.M-rcXX
-
--REL=$1
--SRCDIR=~/git/lttng-modules
-+# Default maintainer values
-+SRCDIR="${HOME}/git/lttng-modules"
- # The output files are created in ${HOME}/stable/
--OUTPUTDIR=${HOME}/stable
-+OUTPUTDIR="${HOME}/stable"
-+SIGN="yes"
-+VERBOSE=""
-+
-+usage() {
-+ echo "Usage: do-release.sh [OPTION]... RELEASE"
-+ echo
-+ echo "Mandatory arguments to long options are mandatory for short options too."
-+ echo " -s, --srcdir DIR source directory"
-+ echo " -o, --outputdir DIR output directory, must exist"
-+ echo " -n, --no-sign don't GPG sign the output archive"
-+ echo " -v, --verbose verbose command output"
-+}
-+
-+POS_ARGS=()
-+while [[ $# -gt 0 ]]
-+do
-+ arg="$1"
-+
-+ case $arg in
-+ -n|--no-sign)
-+ SIGN="no"
-+ shift 1
-+ ;;
-+
-+ -s|--srcdir)
-+ SRCDIR="$2"
-+ shift 2
-+ ;;
-+
-+ -o|--outputdir)
-+ OUTPUTDIR="$2"
-+ shift 2
-+ ;;
-+
-+ -v|--verbose)
-+ VERBOSE="-v"
-+ shift 1
-+ ;;
-+
-+ # Catch unknown arguments
-+ -*)
-+ usage
-+ exit 1
-+ ;;
-+
-+ *)
-+ POS_ARGS+=("$1")
-+ shift
-+ ;;
-+ esac
-+done
-+set -- "${POS_ARGS[@]}"
-
--if [ x"$1" = x"" ]; then
-- echo "1 arg : VERSION";
-+REL=${1:-}
-+
-+if [ x"${REL}" = x"" ]; then
-+ usage
- exit 1;
- fi
-
--cd ${OUTPUTDIR}
-+echo "Doing LTTng modules release ${REL}"
-+echo " Source dir: ${SRCDIR}"
-+echo " Output dir: ${OUTPUTDIR}"
-+echo " GPG sign: ${SIGN}"
-
--echo Doing LTTng modules release ${REL}
-+# Make sure the output directory exists
-+if [ ! -d "${OUTPUTDIR}" ]; then
-+ echo "Output directory '${OUTPUTDIR}' doesn't exist."
-+ exit 1
-+fi
-
--mkdir lttng-modules-${REL}
--cd lttng-modules-${REL}
--cp -ax ${SRCDIR}/. .
-+# Make sure the source directory is a git repository
-+if [ ! -r "${SRCDIR}/.git/config" ]; then
-+ echo "Source directory '${SRCDIR}' isn't a git repository."
-+ exit 1
-+fi
-
--#cleanup
--make clean
--git clean -xdf
-+# Set the git repo directory for all further git commands
-+export GIT_DIR="${SRCDIR}/.git/"
-
--for a in \*.orig \*.rej Module.markers Module.symvers; do
-- find . -name "${a}" -exec rm '{}' \;;
--done
--for a in outgoing .tmp_versions .git .pc; do
-- find . -name "${a}" -exec rm -rf '{}' \;;
--done
-+# Check if the release tag exists
-+if ! git rev-parse "refs/tags/v${REL}" >/dev/null 2>&1; then
-+ echo "Release tag 'v${REL}' doesn't exist."
-+ exit 1
-+fi
-+
-+# Generate the compressed tar archive, the git attributes from the tag will be used.
-+git archive $VERBOSE --format=tar --prefix="lttng-modules-${REL}/" "v${REL}" | bzip2 > "${OUTPUTDIR}/lttng-modules-${REL}.tar.bz2"
-
--cd ..
--tar cvfj lttng-modules-${REL}.tar.bz2 lttng-modules-${REL}
--mksums lttng-modules-${REL}.tar.bz2
--signpkg lttng-modules-${REL}.tar.bz2
-+pushd "${OUTPUTDIR}" >/dev/null
-+# Generate the hashes
-+md5sum "lttng-modules-${REL}.tar.bz2" > "lttng-modules-${REL}.tar.bz2.md5"
-+sha256sum "lttng-modules-${REL}.tar.bz2" > "lttng-modules-${REL}.tar.bz2.sha256"
-+
-+if [ "x${SIGN}" = "xyes" ]; then
-+ # Sign with the default key
-+ gpg --armor -b "lttng-modules-${REL}.tar.bz2"
-+fi
-+popd >/dev/null
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0013-fix-backport-of-fix-ext4-fast-commit-recovery-path-v.patch b/meta/recipes-kernel/lttng/lttng-modules/0013-fix-backport-of-fix-ext4-fast-commit-recovery-path-v.patch
deleted file mode 100644
index f6288923e1..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0013-fix-backport-of-fix-ext4-fast-commit-recovery-path-v.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 59fcc704bea8ecf4bd401e744df41e3331359524 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Mon, 23 Nov 2020 10:19:52 -0500
-Subject: [PATCH 13/16] fix: backport of fix: ext4: fast commit recovery path
- (v5.10)
-
-Add missing '#endif'.
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I43349d685d7ed740b32ce992be0c2e7e6f12c799
----
- instrumentation/events/lttng-module/ext4.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h
-index 5fddccad..d454fa6e 100644
---- a/instrumentation/events/lttng-module/ext4.h
-+++ b/instrumentation/events/lttng-module/ext4.h
-@@ -1446,6 +1446,7 @@ LTTNG_TRACEPOINT_EVENT(ext4_load_inode,
- )
- )
- #endif
-+#endif
-
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0))
-
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0014-Revert-fix-include-order-for-older-kernels.patch b/meta/recipes-kernel/lttng/lttng-modules/0014-Revert-fix-include-order-for-older-kernels.patch
deleted file mode 100644
index 446391a832..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0014-Revert-fix-include-order-for-older-kernels.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From b2df75dd378ce5260bb51872e43ac1d76fbf4588 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 23 Nov 2020 14:21:51 -0500
-Subject: [PATCH 14/16] Revert "fix: include order for older kernels"
-
-This reverts commit 2ce89d35c9477d8c17c00489c72e1548e16af9b9.
-
-This commit is only needed for master and stable-2.12, because
-stable-2.11 does not include irq_work.h.
-
-Upstream-Status: Backport
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- lttng-events.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/lttng-events.h b/lttng-events.h
-index f5cc57c6..099fd78b 100644
---- a/lttng-events.h
-+++ b/lttng-events.h
-@@ -16,7 +16,6 @@
- #include <linux/kref.h>
- #include <lttng-cpuhotplug.h>
- #include <linux/uuid.h>
--#include <linux/irq_work.h>
- #include <wrapper/uprobes.h>
- #include <lttng-tracer.h>
- #include <lttng-abi.h>
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0015-fix-backport-of-fix-tracepoint-Optimize-using-static.patch b/meta/recipes-kernel/lttng/lttng-modules/0015-fix-backport-of-fix-tracepoint-Optimize-using-static.patch
deleted file mode 100644
index 1ff10d48da..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0015-fix-backport-of-fix-tracepoint-Optimize-using-static.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From f8922333020aaa267e17fb23180b56c4c16ebe9e Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Tue, 24 Nov 2020 11:11:42 -0500
-Subject: [PATCH 15/16] fix: backport of fix: tracepoint: Optimize using
- static_call() (v5.10)
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: I94f2b845f11654e639f03254185980de527a4ca8
----
- lttng-statedump-impl.c | 9 ++++-----
- 1 file changed, 4 insertions(+), 5 deletions(-)
-
-diff --git a/lttng-statedump-impl.c b/lttng-statedump-impl.c
-index e0b19b42..a8c32db5 100644
---- a/lttng-statedump-impl.c
-+++ b/lttng-statedump-impl.c
-@@ -72,10 +72,9 @@ LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
-
- LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
- TP_PROTO(struct lttng_session *session,
-- struct files_struct *files,
-- int fd, const char *filename,
-+ struct task_struct *p, int fd, const char *filename,
- unsigned int flags, fmode_t fmode),
-- TP_ARGS(session, files, fd, filename, flags, fmode));
-+ TP_ARGS(session, p, fd, filename, flags, fmode));
-
- LTTNG_DEFINE_TRACE(lttng_statedump_start,
- TP_PROTO(struct lttng_session *session),
-@@ -85,8 +84,8 @@ LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
- TP_PROTO(struct lttng_session *session,
- struct task_struct *p,
- int type, int mode, int submode, int status,
-- struct files_struct *files),
-- TP_ARGS(session, p, type, mode, submode, status, files));
-+ struct pid_namespace *pid_ns),
-+ TP_ARGS(session, p, type, mode, submode, status, pid_ns));
-
- LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
- TP_PROTO(struct lttng_session *session,
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0016-fix-adjust-version-range-for-trace_find_free_extent.patch b/meta/recipes-kernel/lttng/lttng-modules/0016-fix-adjust-version-range-for-trace_find_free_extent.patch
deleted file mode 100644
index 59d4d7afa7..0000000000
--- a/meta/recipes-kernel/lttng/lttng-modules/0016-fix-adjust-version-range-for-trace_find_free_extent.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 5c3e67d7994097cc75f45258b7518aacb55dde1b Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Tue, 24 Nov 2020 11:27:18 -0500
-Subject: [PATCH 16/16] fix: adjust version range for trace_find_free_extent()
-
-Upstream-Status: Backport
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Change-Id: Iaa6088092cf58b4d29d55f3ff9586c57ae272302
----
- instrumentation/events/lttng-module/btrfs.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/instrumentation/events/lttng-module/btrfs.h b/instrumentation/events/lttng-module/btrfs.h
-index d47f3280..efe7af96 100644
---- a/instrumentation/events/lttng-module/btrfs.h
-+++ b/instrumentation/events/lttng-module/btrfs.h
-@@ -1917,7 +1917,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(btrfs__reserved_extent, btrfs_reserved_extent_f
- #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0) || \
-- LTTNG_KERNEL_RANGE(5,9,6, 5,10,0) || \
-+ LTTNG_KERNEL_RANGE(5,9,5, 5,10,0) || \
- LTTNG_KERNEL_RANGE(5,4,78, 5,5,0))
- LTTNG_TRACEPOINT_EVENT_MAP(find_free_extent,
-
---
-2.25.1
-
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch b/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch
new file mode 100644
index 0000000000..3fc7fd733d
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0017-fix-random-remove-unused-tracepoints-v5.18.patch
@@ -0,0 +1,46 @@
+From 25b70c486bb96de0caf7cea1da42ed07801cca84 Mon Sep 17 00:00:00 2001
+From: Michael Jeanson <mjeanson@efficios.com>
+Date: Mon, 4 Apr 2022 14:33:42 -0400
+Subject: [PATCH 17/19] fix: random: remove unused tracepoints (v5.18)
+
+See upstream commit :
+
+ commit 14c174633f349cb41ea90c2c0aaddac157012f74
+ Author: Jason A. Donenfeld <Jason@zx2c4.com>
+ Date: Thu Feb 10 16:40:44 2022 +0100
+
+ random: remove unused tracepoints
+
+ These explicit tracepoints aren't really used and show sign of aging.
+ It's work to keep these up to date, and before I attempted to keep them
+ up to date, they weren't up to date, which indicates that they're not
+ really used. These days there are better ways of introspecting anyway.
+
+Upstream-Status: Backport [369d82bb1746447514c877088d7c5fd0f39140f8]
+Change-Id: I3b8c3e2732e7efdd76ce63204ac53a48784d0df6
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+---
+ probes/Kbuild | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 3ae2d39e..58da82b8 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -215,8 +215,11 @@ ifneq ($(CONFIG_FRAME_WARN),0)
+ CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
+ endif
+
++# Introduced in v3.6, remove in v5.18
+ obj-$(CONFIG_LTTNG) += $(shell \
+- if [ $(VERSION) -ge 4 \
++ if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
++ -a \
++ $(VERSION) -ge 4 \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch b/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
new file mode 100644
index 0000000000..5c324a9bde
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch
@@ -0,0 +1,45 @@
+From da956d1444139883f5d01078d945078738ffade4 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Thu, 2 Jun 2022 06:36:08 +0000
+Subject: [PATCH 18/19] fix: random: remove unused tracepoints (v5.10, v5.15)
+
+The following kernel commit has been back ported to v5.10.119 and v5.15.44.
+
+commit 14c174633f349cb41ea90c2c0aaddac157012f74
+Author: Jason A. Donenfeld <Jason@zx2c4.com>
+Date: Thu Feb 10 16:40:44 2022 +0100
+
+ random: remove unused tracepoints
+
+ These explicit tracepoints aren't really used and show sign of aging.
+ It's work to keep these up to date, and before I attempted to keep them
+ up to date, they weren't up to date, which indicates that they're not
+ really used. These days there are better ways of introspecting anyway.
+
+Upstream-Status: Backport [1901e0eb58795e850e8fdcb5e1c235e4397b470d]
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Change-Id: I0b7eb8aa78b5bd2039e20ae3e1da4c5eb9018789
+---
+ probes/Kbuild | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 58da82b8..87f2d681 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -217,7 +217,10 @@ endif
+
+ # Introduced in v3.6, remove in v5.18
+ obj-$(CONFIG_LTTNG) += $(shell \
+- if [ \( ! \( $(VERSION) -ge 6 -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \) \) \
++ if [ \( ! \( $(VERSION) -ge 6 \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 15 -a $(SUBLEVEL) -ge 44 \) \
++ -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 10 -a $(SUBLEVEL) -ge 119\) \) \) \
+ -a \
+ $(VERSION) -ge 4 \
+ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch b/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch
new file mode 100644
index 0000000000..73ba4d06bc
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/0019-fix-random-tracepoints-removed-in-stable-kernels.patch
@@ -0,0 +1,51 @@
+From 2c98e0cd03eba0aa935796bc7413c51b5e4b055c Mon Sep 17 00:00:00 2001
+From: Michael Jeanson <mjeanson@efficios.com>
+Date: Tue, 31 May 2022 15:24:48 -0400
+Subject: [PATCH 19/19] fix: 'random' tracepoints removed in stable kernels
+
+The upstream commit 14c174633f349cb41ea90c2c0aaddac157012f74 removing
+the 'random' tracepoints is being backported to multiple stable kernel
+branches, I don't see how that qualifies as a fix but here we are.
+
+Use the presence of 'include/trace/events/random.h' in the kernel source
+tree instead of the rather tortuous version check to determine if we
+need to build 'lttng-probe-random.ko'.
+
+Upstream-Status: Backport [ed1149ef88fb62c365ac66cf62c58ac6abd8d7e8]
+Change-Id: I8f5f2f4c9e09c61127c49c7949b22dd3fab0460d
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+---
+ probes/Kbuild | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+diff --git a/probes/Kbuild b/probes/Kbuild
+index 87f2d681..f09d6b65 100644
+--- a/probes/Kbuild
++++ b/probes/Kbuild
+@@ -216,18 +216,10 @@ ifneq ($(CONFIG_FRAME_WARN),0)
+ endif
+
+ # Introduced in v3.6, remove in v5.18
+-obj-$(CONFIG_LTTNG) += $(shell \
+- if [ \( ! \( $(VERSION) -ge 6 \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -ge 18 \) \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 15 -a $(SUBLEVEL) -ge 44 \) \
+- -o \( $(VERSION) -eq 5 -a $(PATCHLEVEL) -eq 10 -a $(SUBLEVEL) -ge 119\) \) \) \
+- -a \
+- $(VERSION) -ge 4 \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
+- -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
+- echo "lttng-probe-random.o" ; fi;)
++random_dep = $(srctree)/include/trace/events/random.h
++ifneq ($(wildcard $(random_dep)),)
++ obj-$(CONFIG_LTTNG) += lttng-probe-random.o
++endif
+
+ obj-$(CONFIG_LTTNG) += $(shell \
+ if [ $(VERSION) -ge 4 \
+--
+2.35.1
+
diff --git a/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch b/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch
new file mode 100644
index 0000000000..b4939188cc
--- /dev/null
+++ b/meta/recipes-kernel/lttng/lttng-modules/fix-jbd2-use-the-correct-print-format.patch
@@ -0,0 +1,147 @@
+fix: jbd2: use the correct print format
+See upstream commit :
+
+ commit d87a7b4c77a997d5388566dd511ca8e6b8e8a0a8
+ Author: Bixuan Cui <cuibixuan@linux.alibaba.com>
+ Date: Tue Oct 11 19:33:44 2022 +0800
+
+ jbd2: use the correct print format
+
+ The print format error was found when using ftrace event:
+ <...>-1406 [000] .... 23599442.895823: jbd2_end_commit: dev 252,8 transaction -1866216965 sync 0 head -1866217368
+ <...>-1406 [000] .... 23599442.896299: jbd2_start_commit: dev 252,8 transaction -1866216964 sync 0
+
+ Use the correct print format for transaction, head and tid.
+
+Change-Id: Ic053f0e0c1e24ebc75bae51d07696aaa5e1c0094
+Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+Upstream-status: Backport
+Signed-off-by: Steve Sakoman <steve@sakoman.com>
+Note: combines three upstream commits:
+https://github.com/lttng/lttng-modules/commit/b28830a0dcdf95ec3e6b390b4d032667deaad0c0
+https://github.com/lttng/lttng-modules/commit/4fd2615b87b3cac0fd5bdc5fc82db05f6fcfdecf
+https://github.com/lttng/lttng-modules/commit/612c99eb24bf72f4d47d02025e92de8c35ece14e
+
+diff --git a/instrumentation/events/lttng-module/jbd2.h b/instrumentation/events/lttng-module/jbd2.h
+--- a/instrumentation/events/lttng-module/jbd2.h
++++ b/instrumentation/events/lttng-module/jbd2.h
+@@ -29,6 +29,25 @@ LTTNG_TRACEPOINT_EVENT(jbd2_checkpoint,
+ )
+ )
+
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, journal->j_fs_dev->bd_dev)
++ ctf_integer(char, sync_commit, commit_transaction->t_synchronous_commit)
++ ctf_integer(tid_t, transaction, commit_transaction->t_tid)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+@@ -41,6 +60,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(jbd2_commit
+ ctf_integer(int, transaction, commit_transaction->t_tid)
+ )
+ )
++#endif
+
+ LTTNG_TRACEPOINT_EVENT_INSTANCE(jbd2_commit, jbd2_start_commit,
+
+@@ -79,6 +99,25 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(jbd2_com
+ )
+ #endif
+
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, journal->j_fs_dev->bd_dev)
++ ctf_integer(char, sync_commit, commit_transaction->t_synchronous_commit)
++ ctf_integer(tid_t, transaction, commit_transaction->t_tid)
++ ctf_integer(tid_t, head, journal->j_tail_sequence)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+@@ -91,6 +130,7 @@ LTTNG_TRACEPOINT_EVENT(jbd2_end_commit,
+ ctf_integer(int, head, journal->j_tail_sequence)
+ )
+ )
++#endif
+
+ LTTNG_TRACEPOINT_EVENT(jbd2_submit_inode_data,
+ TP_PROTO(struct inode *inode),
+@@ -103,7 +143,48 @@ LTTNG_TRACEPOINT_EVENT(jbd2_submit_inode
+ )
+ )
+
+-#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(2,6,32))
++#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,2,0) \
++ || LTTNG_KERNEL_RANGE(5,4,229, 5,5,0) \
++ || LTTNG_KERNEL_RANGE(5,10,163, 5,11,0) \
++ || LTTNG_KERNEL_RANGE(5,15,87, 5,16,0) \
++ || LTTNG_KERNEL_RANGE(6,0,18, 6,1,0) \
++ || LTTNG_KERNEL_RANGE(6,1,4, 6,2,0))
++LTTNG_TRACEPOINT_EVENT(jbd2_run_stats,
++ TP_PROTO(dev_t dev, tid_t tid,
++ struct transaction_run_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, dev)
++ ctf_integer(tid_t, tid, tid)
++ ctf_integer(unsigned long, wait, stats->rs_wait)
++ ctf_integer(unsigned long, running, stats->rs_running)
++ ctf_integer(unsigned long, locked, stats->rs_locked)
++ ctf_integer(unsigned long, flushing, stats->rs_flushing)
++ ctf_integer(unsigned long, logging, stats->rs_logging)
++ ctf_integer(__u32, handle_count, stats->rs_handle_count)
++ ctf_integer(__u32, blocks, stats->rs_blocks)
++ ctf_integer(__u32, blocks_logged, stats->rs_blocks_logged)
++ )
++)
++
++LTTNG_TRACEPOINT_EVENT(jbd2_checkpoint_stats,
++ TP_PROTO(dev_t dev, tid_t tid,
++ struct transaction_chp_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_FIELDS(
++ ctf_integer(dev_t, dev, dev)
++ ctf_integer(tid_t, tid, tid)
++ ctf_integer(unsigned long, chp_time, stats->cs_chp_time)
++ ctf_integer(__u32, forced_to_close, stats->cs_forced_to_close)
++ ctf_integer(__u32, written, stats->cs_written)
++ ctf_integer(__u32, dropped, stats->cs_dropped)
++ )
++)
++#else
+ LTTNG_TRACEPOINT_EVENT(jbd2_run_stats,
+ TP_PROTO(dev_t dev, unsigned long tid,
+ struct transaction_run_stats_s *stats),
diff --git a/meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb b/meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb
index 3145f0298c..8e9c44241b 100644
--- a/meta/recipes-kernel/lttng/lttng-modules_2.11.6.bb
+++ b/meta/recipes-kernel/lttng/lttng-modules_2.11.9.bb
@@ -12,26 +12,14 @@ COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux'
SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \
file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \
file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \
- file://0001-fix-strncpy-equals-destination-size-warning.patch \
- file://0002-fix-objtool-Rename-frame.h-objtool.h-v5.10.patch \
- file://0003-fix-btrfs-tracepoints-output-proper-root-owner-for-t.patch \
- file://0004-fix-btrfs-make-ordered-extent-tracepoint-take-btrfs_.patch \
- file://0005-fix-ext4-fast-commit-recovery-path-v5.10.patch \
- file://0006-fix-KVM-x86-Add-intr-vectoring-info-and-error-code-t.patch \
- file://0007-fix-kvm-x86-mmu-Add-TDP-MMU-PF-handler-v5.10.patch \
- file://0008-fix-KVM-x86-mmu-Return-unique-RET_PF_-values-if-the-.patch \
- file://0009-fix-tracepoint-Optimize-using-static_call-v5.10.patch \
- file://0010-fix-include-order-for-older-kernels.patch \
- file://0011-Add-release-maintainer-script.patch \
- file://0012-Improve-the-release-script.patch \
- file://0013-fix-backport-of-fix-ext4-fast-commit-recovery-path-v.patch \
- file://0014-Revert-fix-include-order-for-older-kernels.patch \
- file://0015-fix-backport-of-fix-tracepoint-Optimize-using-static.patch \
- file://0016-fix-adjust-version-range-for-trace_find_free_extent.patch \
+ file://0017-fix-random-remove-unused-tracepoints-v5.18.patch \
+ file://0018-fix-random-remove-unused-tracepoints-v5.10-v5.15.patch \
+ file://0019-fix-random-tracepoints-removed-in-stable-kernels.patch \
+ file://fix-jbd2-use-the-correct-print-format.patch \
"
-SRC_URI[md5sum] = "8ef09fdfcdec669d33f7fc1c1c80f2c4"
-SRC_URI[sha256sum] = "23372811cdcd2ac28ba8c9d09484ed5f9238cfbd0043f8c663ff3875ba9c8566"
+SRC_URI[md5sum] = "cfb23ea6bdaf1ad40c7f9ac098b4016d"
+SRC_URI[sha256sum] = "0c5fe9f8d8dbd1411a3c1c643dcbd0a55577bd15845758b73948e00bc7c387a6"
export INSTALL_MOD_DIR="kernel/lttng-modules"
diff --git a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
index f9df345ca5..32b89bb5ea 100644
--- a/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
+++ b/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb
@@ -3,7 +3,7 @@ HOMEPAGE = "https://www.yoctoproject.org/"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
-inherit kernel-arch
+inherit kernel-arch linux-kernel-base
inherit pkgconfig
PACKAGE_ARCH = "${MACHINE_ARCH}"
diff --git a/meta/recipes-kernel/perf/perf.bb b/meta/recipes-kernel/perf/perf.bb
index 9c9bf1647f..42621e47d3 100644
--- a/meta/recipes-kernel/perf/perf.bb
+++ b/meta/recipes-kernel/perf/perf.bb
@@ -9,11 +9,11 @@ HOMEPAGE = "https://perf.wiki.kernel.org/index.php/Main_Page"
LICENSE = "GPLv2"
-PR = "r9"
+PR = "r10"
PACKAGECONFIG ??= "scripting tui libunwind"
PACKAGECONFIG[dwarf] = ",NO_DWARF=1"
-PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3"
+PACKAGECONFIG[scripting] = ",NO_LIBPERL=1 NO_LIBPYTHON=1,perl python3 python3-setuptools-native"
# gui support was added with kernel 3.6.35
# since 3.10 libnewt was replaced by slang
# to cover a wide range of kernel we add both dependencies
diff --git a/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch b/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch
new file mode 100644
index 0000000000..f885c44460
--- /dev/null
+++ b/meta/recipes-kernel/systemtap/systemtap/0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch
@@ -0,0 +1,49 @@
+From f199d1982ef8a6c6d5c06c082d057b8793bcc6aa Mon Sep 17 00:00:00 2001
+From: Serhei Makarov <serhei@serhei.io>
+Date: Fri, 21 Jan 2022 18:21:46 -0500
+Subject: [PATCH] gcc12 c++ compatibility re-tweak for rhel6: use function
+ pointer instead of lambdas instead of ptr_fun<>
+
+Saving 2 lines in ltrim/rtrim is probably not a good reason to drop
+compatibility with the RHEL6 system compiler. Actually declaring a
+named function and passing the function pointer is compatible with
+everything.
+
+Upstream-Status: Backport [https://sourceware.org/git/?p=systemtap.git;a=commit;h=f199d1982ef8a6c6d5c06c082d057b8793bcc6aa]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ util.cxx | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/util.cxx
++++ b/util.cxx
+@@ -1757,21 +1757,24 @@ flush_to_stream (const string &fname, os
+ return 1; // Failure
+ }
+
++int
++not_isspace(unsigned char c)
++{
++ return !std::isspace(c);
++}
++
+ // trim from start (in place)
+ void
+ ltrim(std::string &s)
+ {
+- s.erase(s.begin(),
+- std::find_if(s.begin(), s.end(),
+- std::not1(std::ptr_fun<int, int>(std::isspace))));
++ s.erase(s.begin(), std::find_if(s.begin(), s.end(), not_isspace));
+ }
+
+ // trim from end (in place)
+ void
+ rtrim(std::string &s)
+ {
+- s.erase(std::find_if(s.rbegin(), s.rend(),
+- std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
++ s.erase(std::find_if(s.rbegin(), s.rend(), not_isspace).base(), s.end());
+ }
+
+ // trim from both ends (in place)
diff --git a/meta/recipes-kernel/systemtap/systemtap_git.bb b/meta/recipes-kernel/systemtap/systemtap_git.bb
index bdd8fb83b0..a8b2cf1eac 100644
--- a/meta/recipes-kernel/systemtap/systemtap_git.bb
+++ b/meta/recipes-kernel/systemtap/systemtap_git.bb
@@ -6,7 +6,9 @@ HOMEPAGE = "https://sourceware.org/systemtap/"
require systemtap_git.inc
-SRC_URI += "file://0001-improve-reproducibility-for-c-compiling.patch"
+SRC_URI += "file://0001-improve-reproducibility-for-c-compiling.patch \
+ file://0001-gcc12-c-compatibility-re-tweak-for-rhel6-use-functio.patch \
+ "
DEPENDS = "elfutils"
diff --git a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
index ad6ba8dc8b..6489bc90d9 100644
--- a/meta/recipes-kernel/wireless-regdb/wireless-regdb_2022.04.08.bb
+++ b/meta/recipes-kernel/wireless-regdb/wireless-regdb_2024.01.23.bb
@@ -5,7 +5,7 @@ LICENSE = "ISC"
LIC_FILES_CHKSUM = "file://LICENSE;md5=07c4f6dea3845b02a18dc00c8c87699c"
SRC_URI = "https://www.kernel.org/pub/software/network/${BPN}/${BP}.tar.xz"
-SRC_URI[sha256sum] = "884ba2e3c1e8b98762b6dc25ff60b5ec75c8d33a39e019b3ed4aa615491460d3"
+SRC_URI[sha256sum] = "c8a61c9acf76fa7eb4239e89f640dee3e87098d9f69b4d3518c9c60fc6d20c55"
inherit bin_package allarch
@@ -13,7 +13,7 @@ do_install() {
install -d -m0755 ${D}${nonarch_libdir}/crda
install -d -m0755 ${D}${sysconfdir}/wireless-regdb/pubkeys
install -m 0644 regulatory.bin ${D}${nonarch_libdir}/crda/regulatory.bin
- install -m 0644 sforshee.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/sforshee.key.pub.pem
+ install -m 0644 wens.key.pub.pem ${D}${sysconfdir}/wireless-regdb/pubkeys/wens.key.pub.pem
install -m 0644 -D regulatory.db ${D}${nonarch_base_libdir}/firmware/regulatory.db
install -m 0644 regulatory.db.p7s ${D}${nonarch_base_libdir}/firmware/regulatory.db.p7s
diff --git a/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb b/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
index 659eea672f..8205982fcc 100644
--- a/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
+++ b/meta/recipes-multimedia/alsa/alsa-plugins_1.2.1.bb
@@ -36,7 +36,7 @@ PACKAGECONFIG ??= "\
speexdsp \
${@bb.utils.filter('DISTRO_FEATURES', 'pulseaudio', d)} \
"
-PACKAGECONFIG[aaf] = "--enable-aaf,--disable-aaf,avtp"
+PACKAGECONFIG[aaf] = "--enable-aaf,--disable-aaf,libavtp"
PACKAGECONFIG[jack] = "--enable-jack,--disable-jack,jack"
PACKAGECONFIG[libav] = "--enable-libav,--disable-libav,libav"
PACKAGECONFIG[maemo-plugin] = "--enable-maemo-plugin,--disable-maemo-plugin"
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch
new file mode 100644
index 0000000000..bd8a08a216
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-1475.patch
@@ -0,0 +1,36 @@
+From: Michael Niedermayer <michael@niedermayer.cc>
+Date: Sun, 27 Feb 2022 14:43:04 +0100
+Subject: [PATCH] avcodec/g729_parser: Check channels
+
+Fixes: signed integer overflow: 10 * 808464428 cannot be represented in type 'int'
+Fixes: assertion failure
+Fixes: ticket9651
+
+Reviewed-by: Paul B Mahol <onemda@gmail.com>
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+(cherry picked from commit 757da974b21833529cc41bdcc9684c29660cdfa8)
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+
+CVE: CVE-2022-1475
+Upstream-Status: Backport [https://git.videolan.org/?p=ffmpeg.git;a=commitdiff;h=e9e2ddbc6c78cc18b76093617f82c920e58a8d1f]
+Comment: Patch is refreshed as per ffmpeg codebase
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+
+---
+ libavcodec/g729_parser.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+Index: ffmpeg-4.2.2/libavcodec/g729_parser.c
+===================================================================
+--- a/libavcodec/g729_parser.c
++++ b/libavcodec/g729_parser.c
+@@ -48,6 +48,9 @@ static int g729_parse(AVCodecParserConte
+ av_assert1(avctx->codec_id == AV_CODEC_ID_G729);
+ /* FIXME: replace this heuristic block_size with more precise estimate */
+ s->block_size = (avctx->bit_rate < 8000) ? G729D_6K4_BLOCK_SIZE : G729_8K_BLOCK_SIZE;
++ // channels > 2 is invalid, we pass the packet on unchanged
++ if (avctx->channels > 2)
++ s->block_size = 0;
+ s->block_size *= avctx->channels;
+ s->duration = avctx->frame_size;
+ }
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch
new file mode 100644
index 0000000000..febf49cff2
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3109.patch
@@ -0,0 +1,41 @@
+From 656cb0450aeb73b25d7d26980af342b37ac4c568 Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Tue, 15 Feb 2022 17:58:08 +0800
+Subject: [PATCH] avcodec/vp3: Add missing check for av_malloc
+
+Since the av_malloc() may fail and return NULL pointer,
+it is needed that the 's->edge_emu_buffer' should be checked
+whether the new allocation is success.
+
+Fixes: d14723861b ("VP3: fix decoding of videos with stride > 2048")
+
+CVE: CVE-2022-3109
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/656cb0450aeb73b25d7d26980af342b37ac4c568]
+Comments: Refreshed hunk
+
+Reviewed-by: Peter Ross <pross@xvid.org>
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ libavcodec/vp3.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
+index e9ab54d73677..e2418eb6fa04 100644
+--- a/libavcodec/vp3.c
++++ b/libavcodec/vp3.c
+@@ -2740,8 +2740,13 @@
+ if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0)
+ goto error;
+
+- if (!s->edge_emu_buffer)
++ if (!s->edge_emu_buffer) {
+ s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
++ if (!s->edge_emu_buffer) {
++ ret = AVERROR(ENOMEM);
++ goto error;
++ }
++ }
+
+ if (s->keyframe) {
+ if (!s->theora) {
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch
new file mode 100644
index 0000000000..fcbd9b3e1b
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-3341.patch
@@ -0,0 +1,67 @@
+From 9cf652cef49d74afe3d454f27d49eb1a1394951e Mon Sep 17 00:00:00 2001
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Date: Wed, 23 Feb 2022 10:31:59 +0800
+Subject: [PATCH] avformat/nutdec: Add check for avformat_new_stream
+
+Check for failure of avformat_new_stream() and propagate
+the error code.
+
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+
+CVE: CVE-2022-3341
+
+Upstream-Status: Backport [https://github.com/FFmpeg/FFmpeg/commit/9cf652cef49d74afe3d454f27d49eb1a1394951e]
+
+Comments: Refreshed Hunk
+Signed-off-by: Narpat Mali <narpat.mali@windriver.com>
+Signed-off-by: Bhabu Bindu <bhabu.bindu@kpit.com>
+---
+ libavformat/nutdec.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c
+index 0a8a700acf..f9ad2c0af1 100644
+--- a/libavformat/nutdec.c
++++ b/libavformat/nutdec.c
+@@ -351,8 +351,12 @@ static int decode_main_header(NUTContext *nut)
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+- for (i = 0; i < stream_count; i++)
+- avformat_new_stream(s, NULL);
++ for (i = 0; i < stream_count; i++) {
++ if (!avformat_new_stream(s, NULL)) {
++ ret = AVERROR(ENOMEM);
++ goto fail;
++ }
++ }
+
+ return 0;
+ fail:
+@@ -793,19 +793,23 @@
+ NUTContext *nut = s->priv_data;
+ AVIOContext *bc = s->pb;
+ int64_t pos;
+- int initialized_stream_count;
++ int initialized_stream_count, ret;
+
+ nut->avf = s;
+
+ /* main header */
+ pos = 0;
++ ret = 0;
+ do {
++ if (ret == AVERROR(ENOMEM))
++ return ret;
++
+ pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1;
+ if (pos < 0 + 1) {
+ av_log(s, AV_LOG_ERROR, "No main startcode found.\n");
+ goto fail;
+ }
+- } while (decode_main_header(nut) < 0);
++ } while ((ret = decode_main_header(nut)) < 0);
+
+ /* stream headers */
+ pos = 0;
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
new file mode 100644
index 0000000000..707073709a
--- /dev/null
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg/CVE-2022-48434.patch
@@ -0,0 +1,136 @@
+From d4b7b3c03ee2baf0166ce49dff17ec9beff684db Mon Sep 17 00:00:00 2001
+From: Anton Khirnov <anton@khirnov.net>
+Date: Fri, 2 Sep 2022 22:21:27 +0200
+Subject: [PATCH] lavc/pthread_frame: avoid leaving stale hwaccel state in
+ worker threads
+
+This state is not refcounted, so make sure it always has a well-defined
+owner.
+
+Remove the block added in 091341f2ab5bd35ca1a2aae90503adc74f8d3523, as
+this commit also solves that issue in a more general way.
+
+(cherry picked from commit cc867f2c09d2b69cee8a0eccd62aff002cbbfe11)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+(cherry picked from commit 35aa7e70e7ec350319e7634a30d8d8aa1e6ecdda)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+(cherry picked from commit 3bc28e9d1ab33627cea3c632dd6b0c33e22e93ba)
+Signed-off-by: Anton Khirnov <anton@khirnov.net>
+
+CVE: CVE-2022-48434
+Upstream-Status: Backport [https://git.ffmpeg.org/gitweb/ffmpeg.git/commit/d4b7b3c03ee2baf0166ce49dff17ec9beff684db]
+Signed-off-by: Ranjitsinh Rathod ranjitsinh.rathod@kpit.com
+Comment: Hunk#6 refreshed to backport changes and other to remove patch-fuzz warnings
+---
+ libavcodec/pthread_frame.c | 46 +++++++++++++++++++++++++++++---------
+ 1 file changed, 35 insertions(+), 11 deletions(-)
+
+diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
+index 36ac0ac..bbc5ba6 100644
+--- a/libavcodec/pthread_frame.c
++++ b/libavcodec/pthread_frame.c
+@@ -135,6 +135,12 @@ typedef struct FrameThreadContext {
+ * Set for the first N packets, where N is the number of threads.
+ * While it is set, ff_thread_en/decode_frame won't return any results.
+ */
++
++ /* hwaccel state is temporarily stored here in order to transfer its ownership
++ * to the next decoding thread without the need for extra synchronization */
++ const AVHWAccel *stash_hwaccel;
++ void *stash_hwaccel_context;
++ void *stash_hwaccel_priv;
+ } FrameThreadContext;
+
+ #define THREAD_SAFE_CALLBACKS(avctx) \
+@@ -211,9 +217,17 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
+ ff_thread_finish_setup(avctx);
+
+ if (p->hwaccel_serializing) {
++ /* wipe hwaccel state to avoid stale pointers lying around;
++ * the state was transferred to FrameThreadContext in
++ * ff_thread_finish_setup(), so nothing is leaked */
++ avctx->hwaccel = NULL;
++ avctx->hwaccel_context = NULL;
++ avctx->internal->hwaccel_priv_data = NULL;
++
+ p->hwaccel_serializing = 0;
+ pthread_mutex_unlock(&p->parent->hwaccel_mutex);
+ }
++ av_assert0(!avctx->hwaccel);
+
+ if (p->async_serializing) {
+ p->async_serializing = 0;
+@@ -275,14 +289,10 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
+ dst->color_range = src->color_range;
+ dst->chroma_sample_location = src->chroma_sample_location;
+
+- dst->hwaccel = src->hwaccel;
+- dst->hwaccel_context = src->hwaccel_context;
+-
+ dst->channels = src->channels;
+ dst->sample_rate = src->sample_rate;
+ dst->sample_fmt = src->sample_fmt;
+ dst->channel_layout = src->channel_layout;
+- dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
+
+ if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
+ (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
+@@ -415,6 +425,12 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
+ pthread_mutex_unlock(&p->mutex);
+ return err;
+ }
++
++ /* transfer hwaccel state stashed from previous thread, if any */
++ av_assert0(!p->avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+ }
+
+ av_packet_unref(&p->avpkt);
+@@ -616,6 +632,14 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
+ async_lock(p->parent);
+ }
+
++ /* save hwaccel state for passing to the next thread;
++ * this is done here so that this worker thread can wipe its own hwaccel
++ * state after decoding, without requiring synchronization */
++ av_assert0(!p->parent->stash_hwaccel);
++ p->parent->stash_hwaccel = avctx->hwaccel;
++ p->parent->stash_hwaccel_context = avctx->hwaccel_context;
++ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
++
+ pthread_mutex_lock(&p->progress_mutex);
+ if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
+ av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
+@@ -657,13 +681,6 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+
+ park_frame_worker_threads(fctx, thread_count);
+
+- if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
+- if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
+- av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
+- fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
+- fctx->threads->avctx->internal->is_copy = 1;
+- }
+-
+ for (i = 0; i < thread_count; i++) {
+ PerThreadContext *p = &fctx->threads[i];
+
+@@ -713,6 +730,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
+ pthread_mutex_destroy(&fctx->async_mutex);
+ pthread_cond_destroy(&fctx->async_cond);
+
++ /* if we have stashed hwaccel state, move it to the user-facing context,
++ * so it will be freed in avcodec_close() */
++ av_assert0(!avctx->hwaccel);
++ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
++ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
++ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
++
+ av_freep(&avctx->internal->thread_ctx);
+
+ if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb b/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
index 1d6f2e528b..f12052548f 100644
--- a/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
+++ b/meta/recipes-multimedia/ffmpeg/ffmpeg_4.2.2.bb
@@ -29,6 +29,10 @@ SRC_URI = "https://www.ffmpeg.org/releases/${BP}.tar.xz \
file://0001-libavutil-include-assembly-with-full-path-from-sourc.patch \
file://CVE-2021-3566.patch \
file://CVE-2021-38291.patch \
+ file://CVE-2022-1475.patch \
+ file://CVE-2022-3109.patch \
+ file://CVE-2022-3341.patch \
+ file://CVE-2022-48434.patch \
"
SRC_URI[md5sum] = "348956fc2faa57a2f79bbb84ded9fbc3"
SRC_URI[sha256sum] = "cb754255ab0ee2ea5f66f8850e1bd6ad5cac1cd855d0a2f4990fb8c668b0d29c"
diff --git a/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
new file mode 100644
index 0000000000..e042872dc0
--- /dev/null
+++ b/meta/recipes-multimedia/flac/files/CVE-2020-22219.patch
@@ -0,0 +1,197 @@
+From 579ff6922089cbbbd179619e40e622e279bd719f Mon Sep 17 00:00:00 2001
+From: Martijn van Beurden <mvanb1@gmail.com>
+Date: Wed, 3 Aug 2022 13:52:19 +0200
+Subject: [PATCH] flac: Add and use _nofree variants of safe_realloc functions
+
+Parts of the code use realloc like
+
+x = safe_realloc(x, somesize);
+
+when this is the case, the safe_realloc variant used must free the
+old memory block in case it fails, otherwise it will leak. However,
+there are also instances in the code where handling is different:
+
+if (0 == (x = safe_realloc(y, somesize)))
+ return false
+
+in this case, y should not be freed, as y is not set to NULL we
+could encounter double frees. Here the safe_realloc_nofree
+functions are used.
+
+Upstream-Status: Backport [https://github.com/xiph/flac/commit/21fe95ee828b0b9b944f6aa0bb02d24fbb981815]
+CVE: CVE-2020-22219
+
+Signed-off-by: Meenali Gupta <meenali.gupta@windriver.com>
+---
+ include/share/alloc.h | 41 +++++++++++++++++++++++++++++++----
+ src/flac/encode.c | 4 ++--
+ src/flac/foreign_metadata.c | 2 +-
+ src/libFLAC/bitwriter.c | 2 +-
+ src/libFLAC/metadata_object.c | 2 +-
+ src/plugin_common/tags.c | 2 +-
+ src/share/utf8/iconvert.c | 2 +-
+ 7 files changed, 44 insertions(+), 11 deletions(-)
+
+diff --git a/include/share/alloc.h b/include/share/alloc.h
+index 914de9b..55bdd1d 100644
+--- a/include/share/alloc.h
++++ b/include/share/alloc.h
+@@ -161,17 +161,30 @@ static inline void *safe_realloc_(void *ptr, size_t size)
+ free(oldptr);
+ return newptr;
+ }
+-static inline void *safe_realloc_add_2op_(void *ptr, size_t size1, size_t size2)
++static inline void *safe_realloc_nofree_add_2op_(void *ptr, size_t size1, size_t size2)
++{
++ size2 += size1;
++ if(size2 < size1)
++ return 0;
++ return realloc(ptr, size2);
++}
++
++static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1) {
+ free(ptr);
+ return 0;
+ }
+- return realloc(ptr, size2);
++ size3 += size2;
++ if(size3 < size2) {
++ free(ptr);
++ return 0;
++ }
++ return safe_realloc_(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
++static inline void *safe_realloc_nofree_add_3op_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -182,7 +195,7 @@ static inline void *safe_realloc_add_3op_(void *ptr, size_t size1, size_t size2,
+ return realloc(ptr, size3);
+ }
+
+-static inline void *safe_realloc_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
++static inline void *safe_realloc_nofree_add_4op_(void *ptr, size_t size1, size_t size2, size_t size3, size_t size4)
+ {
+ size2 += size1;
+ if(size2 < size1)
+@@ -205,6 +218,15 @@ static inline void *safe_realloc_mul_2op_(void *ptr, size_t size1, size_t size2)
+ return safe_realloc_(ptr, size1*size2);
+ }
+
++static inline void *safe_realloc_nofree_mul_2op_(void *ptr, size_t size1, size_t size2)
++{
++ if(!size1 || !size2)
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ if(size1 > SIZE_MAX / size2)
++ return 0;
++ return realloc(ptr, size1*size2);
++}
++
+ /* size1 * (size2 + size3) */
+ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
+ {
+@@ -216,4 +238,15 @@ static inline void *safe_realloc_muladd2_(void *ptr, size_t size1, size_t size2,
+ return safe_realloc_mul_2op_(ptr, size1, size2);
+ }
+
++/* size1 * (size2 + size3) */
++static inline void *safe_realloc_nofree_muladd2_(void *ptr, size_t size1, size_t size2, size_t size3)
++{
++ if(!size1 || (!size2 && !size3))
++ return realloc(ptr, 0); /* preserve POSIX realloc(ptr, 0) semantics */
++ size2 += size3;
++ if(size2 < size3)
++ return 0;
++ return safe_realloc_nofree_mul_2op_(ptr, size1, size2);
++}
++
+ #endif
+diff --git a/src/flac/encode.c b/src/flac/encode.c
+index a9b907f..f87250c 100644
+--- a/src/flac/encode.c
++++ b/src/flac/encode.c
+@@ -1743,10 +1743,10 @@ static void static_metadata_clear(static_metadata_t *m)
+ static FLAC__bool static_metadata_append(static_metadata_t *m, FLAC__StreamMetadata *d, FLAC__bool needs_delete)
+ {
+ void *x;
+- if(0 == (x = safe_realloc_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->metadata, sizeof(*m->metadata), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->metadata = (FLAC__StreamMetadata**)x;
+- if(0 == (x = safe_realloc_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
++ if(0 == (x = safe_realloc_nofree_muladd2_(m->needs_delete, sizeof(*m->needs_delete), /*times (*/m->num_metadata, /*+*/1/*)*/)))
+ return false;
+ m->needs_delete = (FLAC__bool*)x;
+ m->metadata[m->num_metadata] = d;
+diff --git a/src/flac/foreign_metadata.c b/src/flac/foreign_metadata.c
+index 9ad9c18..fdfb3cf 100644
+--- a/src/flac/foreign_metadata.c
++++ b/src/flac/foreign_metadata.c
+@@ -75,7 +75,7 @@ static FLAC__bool copy_data_(FILE *fin, FILE *fout, size_t size, const char **er
+
+ static FLAC__bool append_block_(foreign_metadata_t *fm, FLAC__off_t offset, FLAC__uint32 size, const char **error)
+ {
+- foreign_block_t *fb = safe_realloc_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
++ foreign_block_t *fb = safe_realloc_nofree_muladd2_(fm->blocks, sizeof(foreign_block_t), /*times (*/fm->num_blocks, /*+*/1/*)*/);
+ if(fb) {
+ fb[fm->num_blocks].offset = offset;
+ fb[fm->num_blocks].size = size;
+diff --git a/src/libFLAC/bitwriter.c b/src/libFLAC/bitwriter.c
+index 6e86585..a510b0d 100644
+--- a/src/libFLAC/bitwriter.c
++++ b/src/libFLAC/bitwriter.c
+@@ -124,7 +124,7 @@ FLAC__bool bitwriter_grow_(FLAC__BitWriter *bw, uint32_t bits_to_add)
+ FLAC__ASSERT(new_capacity > bw->capacity);
+ FLAC__ASSERT(new_capacity >= bw->words + ((bw->bits + bits_to_add + FLAC__BITS_PER_WORD - 1) / FLAC__BITS_PER_WORD));
+
+- new_buffer = safe_realloc_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
++ new_buffer = safe_realloc_nofree_mul_2op_(bw->buffer, sizeof(bwword), /*times*/new_capacity);
+ if(new_buffer == 0)
+ return false;
+ bw->buffer = new_buffer;
+diff --git a/src/libFLAC/metadata_object.c b/src/libFLAC/metadata_object.c
+index de8e513..aef65be 100644
+--- a/src/libFLAC/metadata_object.c
++++ b/src/libFLAC/metadata_object.c
+@@ -98,7 +98,7 @@ static FLAC__bool free_copy_bytes_(FLAC__byte **to, const FLAC__byte *from, uint
+ /* realloc() failure leaves entry unchanged */
+ static FLAC__bool ensure_null_terminated_(FLAC__byte **entry, uint32_t length)
+ {
+- FLAC__byte *x = safe_realloc_add_2op_(*entry, length, /*+*/1);
++ FLAC__byte *x = safe_realloc_nofree_add_2op_(*entry, length, /*+*/1);
+ if (x != NULL) {
+ x[length] = '\0';
+ *entry = x;
+diff --git a/src/plugin_common/tags.c b/src/plugin_common/tags.c
+index ae440c5..dfa10d3 100644
+--- a/src/plugin_common/tags.c
++++ b/src/plugin_common/tags.c
+@@ -317,7 +317,7 @@ FLAC__bool FLAC_plugin__tags_add_tag_utf8(FLAC__StreamMetadata *tags, const char
+ const size_t value_len = strlen(value);
+ const size_t separator_len = strlen(separator);
+ FLAC__byte *new_entry;
+- if(0 == (new_entry = safe_realloc_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
++ if(0 == (new_entry = safe_realloc_nofree_add_4op_(entry->entry, entry->length, /*+*/value_len, /*+*/separator_len, /*+*/1)))
+ return false;
+ memcpy(new_entry+entry->length, separator, separator_len);
+ entry->length += separator_len;
+diff --git a/src/share/utf8/iconvert.c b/src/share/utf8/iconvert.c
+index 8ab53c1..876c06e 100644
+--- a/src/share/utf8/iconvert.c
++++ b/src/share/utf8/iconvert.c
+@@ -149,7 +149,7 @@ int iconvert(const char *fromcode, const char *tocode,
+ iconv_close(cd1);
+ return ret;
+ }
+- newbuf = safe_realloc_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
++ newbuf = safe_realloc_nofree_add_2op_(utfbuf, (ob - utfbuf), /*+*/1);
+ if (!newbuf)
+ goto fail;
+ ob = (ob - utfbuf) + newbuf;
+--
+2.40.0
diff --git a/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch b/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch
new file mode 100644
index 0000000000..e19833a5ad
--- /dev/null
+++ b/meta/recipes-multimedia/flac/files/CVE-2021-0561.patch
@@ -0,0 +1,34 @@
+From e1575e4a7c5157cbf4e4a16dbd39b74f7174c7be Mon Sep 17 00:00:00 2001
+From: Neelkamal Semwal <neelkamal.semwal@ittiam.com>
+Date: Fri, 18 Dec 2020 22:28:36 +0530
+Subject: [PATCH] libFlac: Exit at EOS in verify mode
+
+When verify mode is enabled, once decoder flags end of stream,
+encode processing is considered complete.
+
+CVE-2021-0561
+
+Signed-off-by: Ralph Giles <giles@thaumas.net>
+
+Upstream-Status: Backport [https://github.com/xiph/flac/commit/e1575e4a7c5157cbf4e4a16dbd39b74f7174c7be]
+CVE: CVE-2021-0561
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/libFLAC/stream_encoder.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c
+index 4c91247fe8..7109802c27 100644
+--- a/src/libFLAC/stream_encoder.c
++++ b/src/libFLAC/stream_encoder.c
+@@ -2610,7 +2610,9 @@ FLAC__bool write_bitbuffer_(FLAC__StreamEncoder *encoder, uint32_t samples, FLAC
+ encoder->private_->verify.needs_magic_hack = true;
+ }
+ else {
+- if(!FLAC__stream_decoder_process_single(encoder->private_->verify.decoder)) {
++ if(!FLAC__stream_decoder_process_single(encoder->private_->verify.decoder)
++ || (!is_last_block
++ && (FLAC__stream_encoder_get_verify_decoder_state(encoder) == FLAC__STREAM_DECODER_END_OF_STREAM))) {
+ FLAC__bitwriter_release_buffer(encoder->private_->frame);
+ FLAC__bitwriter_clear(encoder->private_->frame);
+ if(encoder->protected_->state != FLAC__STREAM_ENCODER_VERIFY_MISMATCH_IN_AUDIO_DATA)
diff --git a/meta/recipes-multimedia/flac/flac_1.3.3.bb b/meta/recipes-multimedia/flac/flac_1.3.3.bb
index cb6692aedf..e593727ac8 100644
--- a/meta/recipes-multimedia/flac/flac_1.3.3.bb
+++ b/meta/recipes-multimedia/flac/flac_1.3.3.bb
@@ -15,6 +15,8 @@ LIC_FILES_CHKSUM = "file://COPYING.FDL;md5=ad1419ecc56e060eccf8184a87c4285f \
DEPENDS = "libogg"
SRC_URI = "http://downloads.xiph.org/releases/flac/${BP}.tar.xz \
+ file://CVE-2020-22219.patch \
+ file://CVE-2021-0561.patch \
"
SRC_URI[md5sum] = "26703ed2858c1fc9ffc05136d13daa69"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch
new file mode 100644
index 0000000000..ee33c5564d
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1920.patch
@@ -0,0 +1,59 @@
+From cf887f1b8e228bff6e19829e6d03995d70ad739d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 10:23:15 +0300
+Subject: [PATCH] matroskademux: Avoid integer-overflow resulting in heap
+ corruption in WavPack header handling code
+
+blocksize + WAVPACK4_HEADER_SIZE might overflow gsize, which then
+results in allocating a very small buffer. Into that buffer blocksize
+data is memcpy'd later which then causes out of bound writes and can
+potentially lead to anything from crashes to remote code execution.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1920
+
+https://gstreamer.freedesktop.org/security/sa-2022-0004.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1226
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2612>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/0df0dd7fe388174e4835eda4526b47f470a56370
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/matroska/matroska-demux.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/gst/matroska/matroska-demux.c b/gst/matroska/matroska-demux.c
+index 64cc6be60be..01d754c3eb9 100644
+--- a/gst/matroska/matroska-demux.c
++++ b/gst/matroska/matroska-demux.c
+@@ -3933,7 +3933,8 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ } else {
+ guint8 *outdata = NULL;
+ gsize buf_size, size;
+- guint32 block_samples, flags, crc, blocksize;
++ guint32 block_samples, flags, crc;
++ gsize blocksize;
+ GstAdapter *adapter;
+
+ adapter = gst_adapter_new ();
+@@ -3974,6 +3975,13 @@ gst_matroska_demux_add_wvpk_header (GstElement * element,
+ return GST_FLOW_ERROR;
+ }
+
++ if (blocksize > G_MAXSIZE - WAVPACK4_HEADER_SIZE) {
++ GST_ERROR_OBJECT (element, "Too big wavpack buffer");
++ gst_buffer_unmap (*buf, &map);
++ g_object_unref (adapter);
++ return GST_FLOW_ERROR;
++ }
++
+ g_assert (newbuf == NULL);
+
+ newbuf =
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch
new file mode 100644
index 0000000000..99dbb2b1b0
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1921.patch
@@ -0,0 +1,69 @@
+From f503caad676971933dc0b52c4b313e5ef0d6dbb0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 12:00:48 +0300
+Subject: [PATCH] avidemux: Fix integer overflow resulting in heap corruption
+ in DIB buffer inversion code
+
+Check that width*bpp/8 doesn't overflow a guint and also that
+height*stride fits into the provided buffer without overflowing.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1921
+
+See https://gstreamer.freedesktop.org/security/sa-2022-0001.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1224
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2608>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/f503caad676971933dc0b52c4b313e5ef0d6dbb0
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/avi/gstavidemux.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/gst/avi/gstavidemux.c b/gst/avi/gstavidemux.c
+index eafe865494c..0d18a6495c7 100644
+--- a/gst/avi/gstavidemux.c
++++ b/gst/avi/gstavidemux.c
+@@ -4973,8 +4973,8 @@ swap_line (guint8 * d1, guint8 * d2, guint8 * tmp, gint bytes)
+ static GstBuffer *
+ gst_avi_demux_invert (GstAviStream * stream, GstBuffer * buf)
+ {
+- gint y, w, h;
+- gint bpp, stride;
++ guint y, w, h;
++ guint bpp, stride;
+ guint8 *tmp = NULL;
+ GstMapInfo map;
+ guint32 fourcc;
+@@ -5001,12 +5001,23 @@ gst_avi_demux_invert (GstAviStream * stream, GstBuffer * buf)
+ h = stream->strf.vids->height;
+ w = stream->strf.vids->width;
+ bpp = stream->strf.vids->bit_cnt ? stream->strf.vids->bit_cnt : 8;
++
++ if ((guint64) w * ((guint64) bpp / 8) > G_MAXUINT - 4) {
++ GST_WARNING ("Width x stride overflows");
++ return buf;
++ }
++
++ if (w == 0 || h == 0) {
++ GST_WARNING ("Zero width or height");
++ return buf;
++ }
++
+ stride = GST_ROUND_UP_4 (w * (bpp / 8));
+
+ buf = gst_buffer_make_writable (buf);
+
+ gst_buffer_map (buf, &map, GST_MAP_READWRITE);
+- if (map.size < (stride * h)) {
++ if (map.size < ((guint64) stride * (guint64) h)) {
+ GST_WARNING ("Buffer is smaller than reported Width x Height x Depth");
+ gst_buffer_unmap (buf, &map);
+ return buf;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch
new file mode 100644
index 0000000000..ebffbc473d
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-1922-1923-1924-1925.patch
@@ -0,0 +1,214 @@
+From ad6012159acf18c6b5c0f4edf037e8c9a2dbc966 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Wed, 18 May 2022 11:24:37 +0300
+Subject: [PATCH] matroskademux: Fix integer overflows in zlib/bz2/etc
+ decompression code
+
+Various variables were of smaller types than needed and there were no
+checks for any overflows when doing additions on the sizes. This is all
+checked now.
+
+In addition the size of the decompressed data is limited to 120MB now as
+any larger sizes are likely pathological and we can avoid out of memory
+situations in many cases like this.
+
+Also fix a bug where the available output size on the next iteration in
+the zlib/bz2 decompression code was provided too large and could
+potentially lead to out of bound writes.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: CVE-2022-1922, CVE-2022-1923, CVE-2022-1924, CVE-2022-1925
+
+https://gstreamer.freedesktop.org/security/sa-2022-0002.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1225
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2610>
+
+CVE: CVE-2022-1922 CVE-2022-1923 CVE-2022-1924 CVE-2022-1925
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/ad6012159acf18c6b5c0f4edf037e8c9a2dbc966
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ .../gst/matroska/matroska-read-common.c | 76 +++++++++++++++----
+ 1 file changed, 61 insertions(+), 15 deletions(-)
+
+diff --git a/gst/matroska/matroska-read-common.c b/gst/matroska/matroska-read-common.c
+index eb317644cc5..6fadbba9567 100644
+--- a/gst/matroska/matroska-read-common.c
++++ b/gst/matroska/matroska-read-common.c
+@@ -70,6 +70,10 @@ typedef struct
+ gboolean audio_only;
+ } TargetTypeContext;
+
++/* 120MB as maximum decompressed data size. Anything bigger is likely
++ * pathological, and like this we avoid out of memory situations in many cases
++ */
++#define MAX_DECOMPRESS_SIZE (120 * 1024 * 1024)
+
+ static gboolean
+ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+@@ -77,19 +81,23 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ GstMatroskaTrackCompressionAlgorithm algo)
+ {
+ guint8 *new_data = NULL;
+- guint new_size = 0;
++ gsize new_size = 0;
+ guint8 *data = *data_out;
+- guint size = *size_out;
++ const gsize size = *size_out;
+ gboolean ret = TRUE;
+
++ if (size > G_MAXUINT32) {
++ GST_WARNING ("too large compressed data buffer.");
++ ret = FALSE;
++ goto out;
++ }
++
+ if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_ZLIB) {
+ #ifdef HAVE_ZLIB
+ /* zlib encoded data */
+ z_stream zstream;
+- guint orig_size;
+ int result;
+
+- orig_size = size;
+ zstream.zalloc = (alloc_func) 0;
+ zstream.zfree = (free_func) 0;
+ zstream.opaque = (voidpf) 0;
+@@ -99,8 +107,8 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ goto out;
+ }
+ zstream.next_in = (Bytef *) data;
+- zstream.avail_in = orig_size;
+- new_size = orig_size;
++ zstream.avail_in = size;
++ new_size = size;
+ new_data = g_malloc (new_size);
+ zstream.avail_out = new_size;
+ zstream.next_out = (Bytef *) new_data;
+@@ -114,10 +122,18 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ break;
+ }
+
++ if (new_size > G_MAXSIZE - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = Z_MEM_ERROR;
++ break;
++ }
++
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+ zstream.next_out = (Bytef *) (new_data + zstream.total_out);
+- zstream.avail_out += 4096;
++ /* avail_out is an unsigned int */
++ g_assert (new_size - zstream.total_out <= G_MAXUINT);
++ zstream.avail_out = new_size - zstream.total_out;
+ } while (zstream.avail_in > 0);
+
+ if (result != Z_STREAM_END) {
+@@ -137,13 +153,11 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ #ifdef HAVE_BZ2
+ /* bzip2 encoded data */
+ bz_stream bzstream;
+- guint orig_size;
+ int result;
+
+ bzstream.bzalloc = NULL;
+ bzstream.bzfree = NULL;
+ bzstream.opaque = NULL;
+- orig_size = size;
+
+ if (BZ2_bzDecompressInit (&bzstream, 0, 0) != BZ_OK) {
+ GST_WARNING ("bzip2 initialization failed.");
+@@ -152,8 +166,8 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ }
+
+ bzstream.next_in = (char *) data;
+- bzstream.avail_in = orig_size;
+- new_size = orig_size;
++ bzstream.avail_in = size;
++ new_size = size;
+ new_data = g_malloc (new_size);
+ bzstream.avail_out = new_size;
+ bzstream.next_out = (char *) new_data;
+@@ -167,17 +181,31 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ break;
+ }
+
++ if (new_size > G_MAXSIZE - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = BZ_MEM_ERROR;
++ break;
++ }
++
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+- bzstream.next_out = (char *) (new_data + bzstream.total_out_lo32);
+- bzstream.avail_out += 4096;
++ bzstream.next_out =
++ (char *) (new_data + ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32);
++ /* avail_out is an unsigned int */
++ g_assert (new_size - ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32 <= G_MAXUINT);
++ bzstream.avail_out =
++ new_size - ((guint64) bzstream.total_out_hi32 << 32) +
++ bzstream.total_out_lo32;
+ } while (bzstream.avail_in > 0);
+
+ if (result != BZ_STREAM_END) {
+ ret = FALSE;
+ g_free (new_data);
+ } else {
+- new_size = bzstream.total_out_lo32;
++ new_size =
++ ((guint64) bzstream.total_out_hi32 << 32) + bzstream.total_out_lo32;
+ }
+ BZ2_bzDecompressEnd (&bzstream);
+
+@@ -189,7 +217,13 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ } else if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_LZO1X) {
+ /* lzo encoded data */
+ int result;
+- int orig_size, out_size;
++ gint orig_size, out_size;
++
++ if (size > G_MAXINT) {
++ GST_WARNING ("too large compressed data buffer.");
++ ret = FALSE;
++ goto out;
++ }
+
+ orig_size = size;
+ out_size = size;
+@@ -203,6 +237,11 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ result = lzo1x_decode (new_data, &out_size, data, &orig_size);
+
+ if (orig_size > 0) {
++ if (new_size > G_MAXINT - 4096 || new_size + 4096 > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ result = LZO_ERROR;
++ break;
++ }
+ new_size += 4096;
+ new_data = g_realloc (new_data, new_size);
+ }
+@@ -221,6 +260,13 @@ gst_matroska_decompress_data (GstMatroskaTrackEncoding * enc,
+ } else if (algo == GST_MATROSKA_TRACK_COMPRESSION_ALGORITHM_HEADERSTRIP) {
+ /* header stripped encoded data */
+ if (enc->comp_settings_length > 0) {
++ if (size > G_MAXSIZE - enc->comp_settings_length
++ || size + enc->comp_settings_length > MAX_DECOMPRESS_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ ret = FALSE;
++ goto out;
++ }
++
+ new_data = g_malloc (size + enc->comp_settings_length);
+ new_size = size + enc->comp_settings_length;
+
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch
new file mode 100644
index 0000000000..f4d38c270e
--- /dev/null
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good/CVE-2022-2122.patch
@@ -0,0 +1,60 @@
+From 14d306da6da51a762c4dc701d161bb52ab66d774 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= <sebastian@centricular.com>
+Date: Mon, 30 May 2022 10:15:37 +0300
+Subject: [PATCH] qtdemux: Fix integer overflows in zlib decompression code
+
+Various variables were of smaller types than needed and there were no
+checks for any overflows when doing additions on the sizes. This is all
+checked now.
+
+In addition the size of the decompressed data is limited to 200MB now as
+any larger sizes are likely pathological and we can avoid out of memory
+situations in many cases like this.
+
+Also fix a bug where the available output size on the next iteration in
+the zlib decompression code was provided too large and could
+potentially lead to out of bound writes.
+
+Thanks to Adam Doupe for analyzing and reporting the issue.
+
+CVE: tbd
+
+https://gstreamer.freedesktop.org/security/sa-2022-0003.html
+
+Fixes https://gitlab.freedesktop.org/gstreamer/gstreamer/-/issues/1225
+
+Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/2610>
+
+https://gitlab.freedesktop.org/gstreamer/gstreamer/-/commit/14d306da6da51a762c4dc701d161bb52ab66d774
+CVE: CVE-2022-2122
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ gst/isomp4/qtdemux.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/gst/isomp4/qtdemux.c b/gst/isomp4/qtdemux.c
+index 7cc346b1e63..97ba0799a8d 100644
+--- a/gst/isomp4/qtdemux.c
++++ b/gst/isomp4/qtdemux.c
+@@ -7905,10 +7905,16 @@ qtdemux_inflate (void *z_buffer, guint z_length, guint * length)
+ break;
+ }
+
++ if (*length > G_MAXUINT - 4096 || *length > QTDEMUX_MAX_SAMPLE_INDEX_SIZE) {
++ GST_WARNING ("too big decompressed data");
++ ret = Z_MEM_ERROR;
++ break;
++ }
++
+ *length += 4096;
+ buffer = (guint8 *) g_realloc (buffer, *length);
+ z.next_out = (Bytef *) (buffer + z.total_out);
+- z.avail_out += 4096;
++ z.avail_out += *length - z.total_out;
+ } while (z.avail_in > 0);
+
+ if (ret != Z_STREAM_END) {
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
index 1038cbf224..831a317a82 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0-plugins-good_1.16.3.bb
@@ -10,6 +10,10 @@ SRC_URI = " \
file://0001-qt-include-ext-qt-gstqtgl.h-instead-of-gst-gl-gstglf.patch \
file://CVE-2021-3497.patch \
file://CVE-2021-3498.patch \
+ file://CVE-2022-1920.patch \
+ file://CVE-2022-1921.patch \
+ file://CVE-2022-1922-1923-1924-1925.patch \
+ file://CVE-2022-2122.patch \
"
SRC_URI[md5sum] = "c79b6c2f8eaadb2bb66615b694db399e"
diff --git a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
index 236d6034d6..14793b7fdf 100644
--- a/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
+++ b/meta/recipes-multimedia/gstreamer/gstreamer1.0_1.16.3.bb
@@ -41,7 +41,7 @@ PACKAGECONFIG[unwind] = "-Dlibunwind=enabled,-Dlibunwind=disabled,libunwind"
PACKAGECONFIG[dw] = "-Dlibdw=enabled,-Dlibdw=disabled,elfutils"
PACKAGECONFIG[bash-completion] = "-Dbash-completion=enabled,-Dbash-completion=disabled,bash-completion"
PACKAGECONFIG[tools] = "-Dtools=enabled,-Dtools=disabled"
-PACKAGECONFIG[setcap] = ",,libcap libcap-native"
+PACKAGECONFIG[setcap] = "-Dptp-helper-permissions=capabilities,,libcap libcap-native"
# TODO: put this in a gettext.bbclass patch
def gettext_oemeson(d):
@@ -83,5 +83,12 @@ CVE_CHECK_WHITELIST += "CVE-2021-3522"
# so we need to ignore the false hits
CVE_CHECK_WHITELIST += "CVE-2021-3497"
CVE_CHECK_WHITELIST += "CVE-2021-3498"
+CVE_CHECK_WHITELIST += "CVE-2022-1920"
+CVE_CHECK_WHITELIST += "CVE-2022-1921"
+CVE_CHECK_WHITELIST += "CVE-2022-1922"
+CVE_CHECK_WHITELIST += "CVE-2022-1923"
+CVE_CHECK_WHITELIST += "CVE-2022-1924"
+CVE_CHECK_WHITELIST += "CVE-2022-1925"
+CVE_CHECK_WHITELIST += "CVE-2022-2122"
require gstreamer1.0-ptest.inc
diff --git a/meta/recipes-multimedia/libpng/files/run-ptest b/meta/recipes-multimedia/libpng/files/run-ptest
new file mode 100644
index 0000000000..9ab5d0c1f4
--- /dev/null
+++ b/meta/recipes-multimedia/libpng/files/run-ptest
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+set -eux
+
+./pngfix pngtest.png &> log.txt 2>&1
+
+if grep -i "OK" log.txt 2>&1 ; then
+ echo "PASS: pngfix passed"
+else
+ echo "FAIL: pngfix failed"
+fi
+rm -f log.txt
+
+./pngtest pngtest.png &> log.txt 2>&1
+
+if grep -i "PASS" log.txt 2>&1 ; then
+ echo "PASS: pngtest passed"
+else
+ echo "FAIL: pngtest failed"
+fi
+rm -f log.txt
+
+for i in pngstest timepng; do
+ if "./${i}" pngtest.png 2>&1; then
+ echo "PASS: $i"
+ else
+ echo "FAIL: $i"
+ fi
+done
diff --git a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb b/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
index 3c46fa3302..9387fc8e2e 100644
--- a/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
+++ b/meta/recipes-multimedia/libpng/libpng_1.6.37.bb
@@ -10,7 +10,10 @@ DEPENDS = "zlib"
LIBV = "16"
-SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz"
+SRC_URI = "\
+ ${SOURCEFORGE_MIRROR}/${BPN}/${BPN}${LIBV}/${BP}.tar.xz \
+ file://run-ptest \
+ "
SRC_URI[md5sum] = "015e8e15db1eecde5f2eb9eb5b6e59e9"
SRC_URI[sha256sum] = "505e70834d35383537b6491e7ae8641f1a4bed1876dbfe361201fc80868d88ca"
@@ -20,7 +23,7 @@ UPSTREAM_CHECK_URI = "http://libpng.org/pub/png/libpng.html"
BINCONFIG = "${bindir}/libpng-config ${bindir}/libpng16-config"
-inherit autotools binconfig-disabled pkgconfig
+inherit autotools binconfig-disabled pkgconfig ptest
# Work around missing symbols
EXTRA_OECONF_append_class-target = " ${@bb.utils.contains("TUNE_FEATURES", "neon", "--enable-arm-neon=on", "--enable-arm-neon=off" ,d)}"
@@ -33,3 +36,11 @@ BBCLASSEXTEND = "native nativesdk"
# CVE-2019-17371 is actually a memory leak in gif2png 2.x
CVE_CHECK_WHITELIST += "CVE-2019-17371"
+
+do_install_ptest() {
+ install -m644 "${S}/pngtest.png" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngfix" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngtest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/pngstest" "${D}${PTEST_PATH}"
+ install -m755 "${B}/.libs/timepng" "${D}${PTEST_PATH}"
+}
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch
new file mode 100644
index 0000000000..f7ae82588f
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2021-4156.patch
@@ -0,0 +1,30 @@
+From ced91d7b971be6173b604154c39279ce90ad87cc Mon Sep 17 00:00:00 2001
+From: yuan <ssspeed00@gmail.com>
+Date: Tue, 20 Apr 2021 16:16:32 +0800
+Subject: [PATCH] flac: Fix improper buffer reusing (#732)
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/ced91d7b971be6173b604154c39279ce90ad87cc]
+CVE: CVE-2021-4156
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/flac.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/src/flac.c b/src/flac.c
+index 0be82ac..4fa5cfa 100644
+--- a/src/flac.c
++++ b/src/flac.c
+@@ -952,7 +952,11 @@ flac_read_loop (SF_PRIVATE *psf, unsigned len)
+ /* Decode some more. */
+ while (pflac->pos < pflac->len)
+ { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
++ { psf_log_printf (psf, "FLAC__stream_decoder_process_single returned false\n") ;
++ /* Current frame is busted, so NULL the pointer. */
++ pflac->frame = NULL ;
+ break ;
++ } ;
+ state = FLAC__stream_decoder_get_state (pflac->fsd) ;
+ if (state >= FLAC__STREAM_DECODER_END_OF_STREAM)
+ { psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ;
+--
+2.40.1
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
new file mode 100644
index 0000000000..e22b4e9389
--- /dev/null
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2022-33065.patch
@@ -0,0 +1,46 @@
+From 0754562e13d2e63a248a1c82f90b30bc0ffe307c Mon Sep 17 00:00:00 2001
+From: Alex Stewart <alex.stewart@ni.com>
+Date: Tue, 10 Oct 2023 16:10:34 -0400
+Subject: [PATCH] mat4/mat5: fix int overflow in dataend calculation
+
+The clang sanitizer warns of a possible signed integer overflow when
+calculating the `dataend` value in `mat4_read_header()`.
+
+```
+src/mat4.c:323:41: runtime error: signed integer overflow: 205 * -100663296 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:41 in
+src/mat4.c:323:48: runtime error: signed integer overflow: 838860800 * 4 cannot be represented in type 'int'
+SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior src/mat4.c:323:48 in
+```
+
+Cast the offending `rows` and `cols` ints to `sf_count_t` (the type of
+`dataend` before performing the calculation, to avoid the issue.
+
+CVE: CVE-2022-33065
+Fixes: https://github.com/libsndfile/libsndfile/issues/789
+Fixes: https://github.com/libsndfile/libsndfile/issues/833
+
+Signed-off-by: Alex Stewart <alex.stewart@ni.com>
+
+Upstream-Status: Backport [https://github.com/libsndfile/libsndfile/commit/0754562e13d2e63a248a1c82f90b30bc0ffe307c]
+CVE: CVE-2022-33065
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/mat4.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/mat4.c b/src/mat4.c
+index 3c73680..e2f98b7 100644
+--- a/src/mat4.c
++++ b/src/mat4.c
+@@ -320,7 +320,7 @@ mat4_read_header (SF_PRIVATE *psf)
+ psf->filelength - psf->dataoffset, psf->sf.channels * psf->sf.frames * psf->bytewidth) ;
+ }
+ else if ((psf->filelength - psf->dataoffset) > psf->sf.channels * psf->sf.frames * psf->bytewidth)
+- psf->dataend = psf->dataoffset + rows * cols * psf->bytewidth ;
++ psf->dataend = psf->dataoffset + (sf_count_t) rows * (sf_count_t) cols * psf->bytewidth ;
+
+ psf->datalength = psf->filelength - psf->dataoffset - psf->dataend ;
+
+--
+2.40.1
diff --git a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
index 2525af8fe0..fb7d94ab75 100644
--- a/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
+++ b/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
@@ -22,7 +22,9 @@ SRC_URI = "http://www.mega-nerd.com/libsndfile/files/libsndfile-${PV}.tar.gz \
file://CVE-2019-3832.patch \
file://CVE-2021-3246_1.patch \
file://CVE-2021-3246_2.patch \
- "
+ file://CVE-2022-33065.patch \
+ file://CVE-2021-4156.patch \
+ "
SRC_URI[md5sum] = "646b5f98ce89ac60cdb060fcd398247c"
SRC_URI[sha256sum] = "1ff33929f042fa333aed1e8923aa628c3ee9e1eb85512686c55092d1e5a9dfa9"
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch
new file mode 100644
index 0000000000..e2d136f587
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0865.patch
@@ -0,0 +1,39 @@
+From a1c933dabd0e1c54a412f3f84ae0aa58115c6067 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 24 Feb 2022 22:26:02 +0100
+Subject: [PATCH] tif_jbig.c: fix crash when reading a file with multiple IFD
+ in memory-mapped mode and when bit reversal is needed (fixes #385)
+
+CVE: CVE-2022-0865
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0865.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_jbig.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/libtiff/tif_jbig.c b/libtiff/tif_jbig.c
+index 74086338..8bfa4cef 100644
+--- a/libtiff/tif_jbig.c
++++ b/libtiff/tif_jbig.c
+@@ -208,6 +208,16 @@ int TIFFInitJBIG(TIFF* tif, int scheme)
+ */
+ tif->tif_flags |= TIFF_NOBITREV;
+ tif->tif_flags &= ~TIFF_MAPPED;
++ /* We may have read from a previous IFD and thus set TIFF_BUFFERMMAP and
++ * cleared TIFF_MYBUFFER. It is necessary to restore them to their initial
++ * value to be consistent with the state of a non-memory mapped file.
++ */
++ if (tif->tif_flags&TIFF_BUFFERMMAP) {
++ tif->tif_rawdata = NULL;
++ tif->tif_rawdatasize = 0;
++ tif->tif_flags &= ~TIFF_BUFFERMMAP;
++ tif->tif_flags |= TIFF_MYBUFFER;
++ }
+
+ /* Setup the function pointers for encode, decode, and cleanup. */
+ tif->tif_setupdecode = JBIGSetupDecode;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch
new file mode 100644
index 0000000000..da3ead5481
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0907.patch
@@ -0,0 +1,94 @@
+From 40b00cfb32256d377608b4d4cd30fac338d0a0bc Mon Sep 17 00:00:00 2001
+From: Augustus <wangdw.augustus@qq.com>
+Date: Mon, 7 Mar 2022 18:21:49 +0800
+Subject: [PATCH] add checks for return value of limitMalloc (#392)
+
+CVE: CVE-2022-0907
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0907.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ tools/tiffcrop.c | 33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f2e5474a..9b8acc7e 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7337,7 +7337,11 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+ if (!sect_buff)
+ {
+ sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
+- *sect_buff_ptr = sect_buff;
++ if (!sect_buff)
++ {
++ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
++ return (-1);
++ }
+ _TIFFmemset(sect_buff, 0, sectsize);
+ }
+ else
+@@ -7353,15 +7357,15 @@ createImageSection(uint32_t sectsize, unsigned char **sect_buff_ptr)
+ else
+ sect_buff = new_buff;
+
++ if (!sect_buff)
++ {
++ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
++ return (-1);
++ }
+ _TIFFmemset(sect_buff, 0, sectsize);
+ }
+ }
+
+- if (!sect_buff)
+- {
+- TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+- return (-1);
+- }
+ prev_sectsize = sectsize;
+ *sect_buff_ptr = sect_buff;
+
+@@ -7628,7 +7632,11 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (!crop_buff)
+ {
+ crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
+- *crop_buff_ptr = crop_buff;
++ if (!crop_buff)
++ {
++ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
++ return (-1);
++ }
+ _TIFFmemset(crop_buff, 0, cropsize);
+ prev_cropsize = cropsize;
+ }
+@@ -7644,15 +7652,15 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ }
+ else
+ crop_buff = new_buff;
++ if (!crop_buff)
++ {
++ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
++ return (-1);
++ }
+ _TIFFmemset(crop_buff, 0, cropsize);
+ }
+ }
+
+- if (!crop_buff)
+- {
+- TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+- return (-1);
+- }
+ *crop_buff_ptr = crop_buff;
+
+ if (crop->crop_mode & CROP_INVERT)
+@@ -9211,3 +9219,4 @@ invertImage(uint16_t photometric, uint16_t spp, uint16_t bps, uint32_t width, ui
+ * fill-column: 78
+ * End:
+ */
++
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch
new file mode 100644
index 0000000000..e65af6c600
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0908.patch
@@ -0,0 +1,34 @@
+From a95b799f65064e4ba2e2dfc206808f86faf93e85 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Thu, 17 Feb 2022 15:28:43 +0100
+Subject: [PATCH] TIFFFetchNormalTag(): avoid calling memcpy() with a null
+ source pointer and size of zero (fixes #383)
+
+CVE: CVE-2022-0908
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0908.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_dirread.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 50ebf8ac..2ec44a4f 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -5021,7 +5021,10 @@ TIFFFetchNormalTag(TIFF* tif, TIFFDirEntry* dp, int recover)
+ _TIFFfree(data);
+ return(0);
+ }
+- _TIFFmemcpy(o,data,(uint32)dp->tdir_count);
++ if (dp->tdir_count > 0 )
++ {
++ _TIFFmemcpy(o,data,(uint32)dp->tdir_count);
++ }
+ o[(uint32)dp->tdir_count]=0;
+ if (data!=0)
+ _TIFFfree(data);
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch
new file mode 100644
index 0000000000..d487f1bd95
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0909.patch
@@ -0,0 +1,37 @@
+From 32ea0722ee68f503b7a3f9b2d557acb293fc8cde Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Tue, 8 Mar 2022 16:22:04 +0000
+Subject: [PATCH] fix the FPE in tiffcrop (#393)
+
+CVE: CVE-2022-0909
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0909.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ libtiff/tif_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 57055ca9..59b346ca 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -334,13 +334,13 @@ _TIFFVSetField(TIFF* tif, uint32_t tag, va_list ap)
+ break;
+ case TIFFTAG_XRESOLUTION:
+ dblval = va_arg(ap, double);
+- if( dblval < 0 )
++ if( dblval != dblval || dblval < 0 )
+ goto badvaluedouble;
+ td->td_xresolution = _TIFFClampDoubleToFloat( dblval );
+ break;
+ case TIFFTAG_YRESOLUTION:
+ dblval = va_arg(ap, double);
+- if( dblval < 0 )
++ if( dblval != dblval || dblval < 0 )
+ goto badvaluedouble;
+ td->td_yresolution = _TIFFClampDoubleToFloat( dblval );
+ break;
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch
new file mode 100644
index 0000000000..ddb035c972
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-0924.patch
@@ -0,0 +1,58 @@
+From 88d79a45a31c74cba98c697892fed5f7db8b963a Mon Sep 17 00:00:00 2001
+From: 4ugustus <wangdw.augustus@qq.com>
+Date: Thu, 10 Mar 2022 08:48:00 +0000
+Subject: [PATCH] fix heap buffer overflow in tiffcp (#278)
+
+CVE: CVE-2022-0924
+Upstream-Status: Backport [https://sources.debian.org/src/tiff/4.1.0+git191117-2%7Edeb10u4/debian/patches/CVE-2022-0924.patch/]
+Signed-off-by: Ranjitsinh Rathod <ranjitsinh.rathod@kpit.com>
+Comment: No change in any hunk
+
+---
+ tools/tiffcp.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 224583e0..aa32b118 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -1524,12 +1524,27 @@ DECLAREwriteFunc(writeBufferToSeparateSt
+ tdata_t obuf;
+ tstrip_t strip = 0;
+ tsample_t s;
++ uint16 bps = 0, bytes_per_sample;
+
+ obuf = _TIFFmalloc(stripsize);
+ if (obuf == NULL)
+ return (0);
+ _TIFFmemset(obuf, 0, stripsize);
+ (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip);
++ (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps);
++ if( bps == 0 )
++ {
++ TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample");
++ _TIFFfree(obuf);
++ return 0;
++ }
++ if( (bps % 8) != 0 )
++ {
++ TIFFError(TIFFFileName(out), "Error, cannot handle BitsPerSample that is not a multiple of 8");
++ _TIFFfree(obuf);
++ return 0;
++ }
++ bytes_per_sample = bps/8;
+ for (s = 0; s < spp; s++) {
+ uint32 row;
+ for (row = 0; row < imagelength; row += rowsperstrip) {
+@@ -1539,7 +1539,7 @@ DECLAREwriteFunc(writeBufferToSeparateSt
+
+ cpContigBufToSeparateBuf(
+ obuf, (uint8*) buf + row*rowsize + s,
+- nrows, imagewidth, 0, 0, spp, 1);
++ nrows, imagewidth, 0, 0, spp, bytes_per_sample);
+ if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) {
+ TIFFError(TIFFFileName(out),
+ "Error, can't write strip %u",
+--
+GitLab
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch
new file mode 100644
index 0000000000..01e81349a2
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch
@@ -0,0 +1,183 @@
+From 8261237113a53cd21029c4a8cbb62c47b4c19523 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Wed, 27 Jul 2022 11:30:18 +0530
+Subject: [PATCH] CVE-2022-2056 CVE-2022-2057 CVE-2022-2058
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/dd1bcc7abb26094e93636e85520f0d8f81ab0fab]
+CVE: CVE-2022-2056 CVE-2022-2057 CVE-2022-2058
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_aux.c | 9 +++++++
+ libtiff/tiffiop.h | 1 +
+ tools/tiffcrop.c | 62 ++++++++++++++++++++++++++---------------------
+ 3 files changed, 44 insertions(+), 28 deletions(-)
+
+diff --git a/libtiff/tif_aux.c b/libtiff/tif_aux.c
+index 8188db5..3dac542 100644
+--- a/libtiff/tif_aux.c
++++ b/libtiff/tif_aux.c
+@@ -402,6 +402,15 @@ float _TIFFClampDoubleToFloat( double val )
+ return (float)val;
+ }
+
++uint32 _TIFFClampDoubleToUInt32(double val)
++{
++ if( val < 0 )
++ return 0;
++ if( val > 0xFFFFFFFFU || val != val )
++ return 0xFFFFFFFFU;
++ return (uint32)val;
++}
++
+ int _TIFFSeekOK(TIFF* tif, toff_t off)
+ {
+ /* Huge offsets, especially -1 / UINT64_MAX, can cause issues */
+diff --git a/libtiff/tiffiop.h b/libtiff/tiffiop.h
+index 45a7932..c6f6f93 100644
+--- a/libtiff/tiffiop.h
++++ b/libtiff/tiffiop.h
+@@ -393,6 +393,7 @@ extern double _TIFFUInt64ToDouble(uint64);
+ extern float _TIFFUInt64ToFloat(uint64);
+
+ extern float _TIFFClampDoubleToFloat(double);
++extern uint32 _TIFFClampDoubleToUInt32(double);
+
+ extern tmsize_t
+ _TIFFReadEncodedStripAndAllocBuffer(TIFF* tif, uint32 strip,
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c2c2052..79dd0a0 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5141,17 +5141,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if ((crop->res_unit == RESUNIT_INCH) || (crop->res_unit == RESUNIT_CENTIMETER))
+ {
+- x1 = (uint32) (crop->corners[i].X1 * scale * xres);
+- x2 = (uint32) (crop->corners[i].X2 * scale * xres);
+- y1 = (uint32) (crop->corners[i].Y1 * scale * yres);
+- y2 = (uint32) (crop->corners[i].Y2 * scale * yres);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1 * scale * xres);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2 * scale * xres);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1 * scale * yres);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2 * scale * yres);
+ }
+ else
+ {
+- x1 = (uint32) (crop->corners[i].X1);
+- x2 = (uint32) (crop->corners[i].X2);
+- y1 = (uint32) (crop->corners[i].Y1);
+- y2 = (uint32) (crop->corners[i].Y2);
++ x1 = _TIFFClampDoubleToUInt32(crop->corners[i].X1);
++ x2 = _TIFFClampDoubleToUInt32(crop->corners[i].X2);
++ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
++ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+ if (x1 < 1)
+ crop->regionlist[i].x1 = 0;
+@@ -5214,17 +5214,17 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ {
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ { /* User has specified pixels as reference unit */
+- tmargin = (uint32)(crop->margins[0]);
+- lmargin = (uint32)(crop->margins[1]);
+- bmargin = (uint32)(crop->margins[2]);
+- rmargin = (uint32)(crop->margins[3]);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0]);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1]);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2]);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3]);
+ }
+ else
+ { /* inches or centimeters specified */
+- tmargin = (uint32)(crop->margins[0] * scale * yres);
+- lmargin = (uint32)(crop->margins[1] * scale * xres);
+- bmargin = (uint32)(crop->margins[2] * scale * yres);
+- rmargin = (uint32)(crop->margins[3] * scale * xres);
++ tmargin = _TIFFClampDoubleToUInt32(crop->margins[0] * scale * yres);
++ lmargin = _TIFFClampDoubleToUInt32(crop->margins[1] * scale * xres);
++ bmargin = _TIFFClampDoubleToUInt32(crop->margins[2] * scale * yres);
++ rmargin = _TIFFClampDoubleToUInt32(crop->margins[3] * scale * xres);
+ }
+
+ if ((lmargin + rmargin) > image->width)
+@@ -5254,24 +5254,24 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+ if (crop->res_unit != RESUNIT_INCH && crop->res_unit != RESUNIT_CENTIMETER)
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32)crop->width;
++ width = _TIFFClampDoubleToUInt32(crop->width);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32)crop->length;
++ length = _TIFFClampDoubleToUInt32(crop->length);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+ else
+ {
+ if (crop->crop_mode & CROP_WIDTH)
+- width = (uint32)(crop->width * scale * image->xres);
++ width = _TIFFClampDoubleToUInt32(crop->width * scale * image->xres);
+ else
+ width = image->width - lmargin - rmargin;
+
+ if (crop->crop_mode & CROP_LENGTH)
+- length = (uint32)(crop->length * scale * image->yres);
++ length = _TIFFClampDoubleToUInt32(crop->length * scale * image->yres);
+ else
+ length = image->length - tmargin - bmargin;
+ }
+@@ -5670,13 +5670,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->res_unit == RESUNIT_INCH || page->res_unit == RESUNIT_CENTIMETER)
+ { /* inches or centimeters specified */
+- hmargin = (uint32)(page->hmargin * scale * page->hres * ((image->bps + 7)/ 8));
+- vmargin = (uint32)(page->vmargin * scale * page->vres * ((image->bps + 7)/ 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * page->hres * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * page->vres * ((image->bps + 7) / 8));
+ }
+ else
+ { /* Otherwise user has specified pixels as reference unit */
+- hmargin = (uint32)(page->hmargin * scale * ((image->bps + 7)/ 8));
+- vmargin = (uint32)(page->vmargin * scale * ((image->bps + 7)/ 8));
++ hmargin = _TIFFClampDoubleToUInt32(page->hmargin * scale * ((image->bps + 7) / 8));
++ vmargin = _TIFFClampDoubleToUInt32(page->vmargin * scale * ((image->bps + 7) / 8));
+ }
+
+ if ((hmargin * 2.0) > (pwidth * page->hres))
+@@ -5714,13 +5714,13 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ {
+ if (page->mode & PAGE_MODE_PAPERSIZE )
+ {
+- owidth = (uint32)((pwidth * page->hres) - (hmargin * 2));
+- olength = (uint32)((plength * page->vres) - (vmargin * 2));
++ owidth = _TIFFClampDoubleToUInt32((pwidth * page->hres) - (hmargin * 2));
++ olength = _TIFFClampDoubleToUInt32((plength * page->vres) - (vmargin * 2));
+ }
+ else
+ {
+- owidth = (uint32)(iwidth - (hmargin * 2 * page->hres));
+- olength = (uint32)(ilength - (vmargin * 2 * page->vres));
++ owidth = _TIFFClampDoubleToUInt32(iwidth - (hmargin * 2 * page->hres));
++ olength = _TIFFClampDoubleToUInt32(ilength - (vmargin * 2 * page->vres));
+ }
+ }
+
+@@ -5729,6 +5729,12 @@ computeOutputPixelOffsets (struct crop_mask *crop, struct image_data *image,
+ if (olength > ilength)
+ olength = ilength;
+
++ if (owidth == 0 || olength == 0)
++ {
++ TIFFError("computeOutputPixelOffsets", "Integer overflow when calculating the number of pages");
++ exit(EXIT_FAILURE);
++ }
++
+ /* Compute the number of pages required for Portrait or Landscape */
+ switch (page->orient)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch
new file mode 100644
index 0000000000..131ff94119
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch
@@ -0,0 +1,159 @@
+From 07d79fcac2ead271b60e32aeb80f7b4f3be9ac8c Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Wed, 9 Feb 2022 21:31:29 +0000
+Subject: [PATCH] tiffcrop.c: Fix issue #352 heap-buffer-overflow by correcting
+ uint32_t underflow.
+
+CVE: CVE-2022-2867 CVE-2022-2868 CVE-2022-2869
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/07d79fcac2ead271b60e32aeb80f7b4f3be9ac8c]
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+Index: tiff-4.1.0/tools/tiffcrop.c
+===================================================================
+--- tiff-4.1.0.orig/tools/tiffcrop.c
++++ tiff-4.1.0/tools/tiffcrop.c
+@@ -5153,29 +5153,45 @@ computeInputPixelOffsets(struct crop_mas
+ y1 = _TIFFClampDoubleToUInt32(crop->corners[i].Y1);
+ y2 = _TIFFClampDoubleToUInt32(crop->corners[i].Y2);
+ }
+- if (x1 < 1)
+- crop->regionlist[i].x1 = 0;
+- else
+- crop->regionlist[i].x1 = (uint32) (x1 - 1);
++ /* a) Region needs to be within image sizes 0.. width-1; 0..length-1
++ * b) Corners are expected to be submitted as top-left to bottom-right.
++ * Therefore, check that and reorder input.
++ * (be aware x,y are already casted to (uint32_t) and avoid (0 - 1) )
++ */
++ uint32_t aux;
++ if (x1 > x2) {
++ aux = x1;
++ x1 = x2;
++ x2 = aux;
++ }
++ if (y1 > y2) {
++ aux = y1;
++ y1 = y2;
++ y2 = aux;
++ }
++ if (x1 > image->width - 1)
++ crop->regionlist[i].x1 = image->width - 1;
++ else if (x1 > 0)
++ crop->regionlist[i].x1 = (uint32_t)(x1 - 1);
+
+ if (x2 > image->width - 1)
+ crop->regionlist[i].x2 = image->width - 1;
+- else
+- crop->regionlist[i].x2 = (uint32) (x2 - 1);
+- zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
+-
+- if (y1 < 1)
+- crop->regionlist[i].y1 = 0;
+- else
+- crop->regionlist[i].y1 = (uint32) (y1 - 1);
++ else if (x2 > 0)
++ crop->regionlist[i].x2 = (uint32_t)(x2 - 1);
++
++ zwidth = crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++
++ if (y1 > image->length - 1)
++ crop->regionlist[i].y1 = image->length - 1;
++ else if (y1 > 0)
++ crop->regionlist[i].y1 = (uint32_t)(y1 - 1);
+
+ if (y2 > image->length - 1)
+ crop->regionlist[i].y2 = image->length - 1;
+- else
+- crop->regionlist[i].y2 = (uint32) (y2 - 1);
+-
+- zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
++ else if (y2 > 0)
++ crop->regionlist[i].y2 = (uint32_t)(y2 - 1);
+
++ zlength = crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
+ if (zwidth > max_width)
+ max_width = zwidth;
+ if (zlength > max_length)
+@@ -5205,7 +5221,7 @@ computeInputPixelOffsets(struct crop_mas
+ }
+ }
+ return (0);
+- }
++ } /* crop_mode == CROP_REGIONS */
+
+ /* Convert crop margins into offsets into image
+ * Margins are expressed as pixel rows and columns, not bytes
+@@ -5241,7 +5257,7 @@ computeInputPixelOffsets(struct crop_mas
+ bmargin = (uint32) 0;
+ return (-1);
+ }
+- }
++ } /* crop_mode == CROP_MARGINS */
+ else
+ { /* no margins requested */
+ tmargin = (uint32) 0;
+@@ -5332,24 +5348,23 @@ computeInputPixelOffsets(struct crop_mas
+ off->endx = endx;
+ off->endy = endy;
+
+- crop_width = endx - startx + 1;
+- crop_length = endy - starty + 1;
+-
+- if (crop_width <= 0)
++ if (endx + 1 <= startx)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid left/right margins and /or image crop width requested");
+ return (-1);
+ }
++ crop_width = endx - startx + 1;
+ if (crop_width > image->width)
+ crop_width = image->width;
+
+- if (crop_length <= 0)
++ if (endy + 1 <= starty)
+ {
+ TIFFError("computeInputPixelOffsets",
+ "Invalid top/bottom margins and /or image crop length requested");
+ return (-1);
+ }
++ crop_length = endy - starty + 1;
+ if (crop_length > image->length)
+ crop_length = image->length;
+
+@@ -5449,10 +5464,17 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->selections = crop->zones;
+
+- for (i = 0; i < crop->zones; i++)
++ /* Initialize regions iterator i */
++ i = 0;
++ for (int j = 0; j < crop->zones; j++)
+ {
+- seg = crop->zonelist[i].position;
+- total = crop->zonelist[i].total;
++ seg = crop->zonelist[j].position;
++ total = crop->zonelist[j].total;
++
++ /* check for not allowed zone cases like 0:0; 4:3; etc. and skip that input */
++ if (seg == 0 || total == 0 || seg > total) {
++ continue;
++ }
+
+ switch (crop->edge_ref)
+ {
+@@ -5581,8 +5603,11 @@ getCropOffsets(struct image_data *image,
+ i + 1, (uint32)zwidth, (uint32)zlength,
+ crop->regionlist[i].x1, crop->regionlist[i].x2,
+ crop->regionlist[i].y1, crop->regionlist[i].y2);
++ /* increment regions iterator */
++ i++;
+ }
+-
++ /* set number of generated regions out of given zones */
++ crop->selections = i;
+ return (0);
+ } /* end getCropOffsets */
+
+--
+GitLab
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch
new file mode 100644
index 0000000000..cf440ce55f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-34526.patch
@@ -0,0 +1,29 @@
+From 06386cc9dff5dc162006abe11fd4d1a6fad616cc Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Thu, 18 Aug 2022 09:40:50 +0530
+Subject: [PATCH] CVE-2022-34526
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/275735d0354e39c0ac1dc3c0db2120d6f31d1990]
+CVE: CVE-2022-34526
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_dirinfo.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index 52d53d4..4a1ca00 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -983,6 +983,9 @@ _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag)
+ default:
+ return 1;
+ }
++ if( !TIFFIsCODECConfigured(tif->tif_dir.td_compression) ) {
++ return 0;
++ }
+ /* Check if codec specific tags are allowed for the current
+ * compression scheme (codec) */
+ switch (tif->tif_dir.td_compression) {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch
new file mode 100644
index 0000000000..760e20dd2b
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3570_3598.patch
@@ -0,0 +1,659 @@
+From 226e336cdceec933da2e9f72b6578c7a1bea450b Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Thu, 13 Oct 2022 14:33:27 +0000
+Subject: [PATCH] tiffcrop subroutines require a larger buffer (fixes #271,
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3570 CVE-2022-3598
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/cfbb883bf6ea7bedcb04177cc4e52d304522fdff
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/24d3b2425af24432e0e4e2fd58b33f3b04c4bfa4
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ #381, #386, #388, #389, #435)
+
+---
+ tools/tiffcrop.c | 209 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 117 insertions(+), 92 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c7877aa..c923920 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -126,6 +126,7 @@ static char tiffcrop_rev_date[] = "03-03-2010";
+
+ #ifdef HAVE_STDINT_H
+ # include <stdint.h>
++# include <inttypes.h>
+ #endif
+
+ #ifndef HAVE_GETOPT
+@@ -212,6 +213,10 @@ extern int getopt(int argc, char * const argv[], const char *optstring);
+
+ #define TIFF_DIR_MAX 65534
+
++/* Some conversion subroutines require image buffers, which are at least 3 bytes
++ * larger than the necessary size for the image itself. */
++#define NUM_BUFF_OVERSIZE_BYTES 3
++
+ /* Offsets into buffer for margins and fixed width and length segments */
+ struct offset {
+ uint32 tmargin;
+@@ -233,7 +238,7 @@ struct offset {
+ */
+
+ struct buffinfo {
+- uint32 size; /* size of this buffer */
++ size_t size; /* size of this buffer */
+ unsigned char *buffer; /* address of the allocated buffer */
+ };
+
+@@ -771,8 +776,8 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8* buf,
+ uint32 dst_rowsize, shift_width;
+ uint32 bytes_per_sample, bytes_per_pixel;
+ uint32 trailing_bits, prev_trailing_bits;
+- uint32 tile_rowsize = TIFFTileRowSize(in);
+- uint32 src_offset, dst_offset;
++ tmsize_t tile_rowsize = TIFFTileRowSize(in);
++ tmsize_t src_offset, dst_offset;
+ uint32 row_offset, col_offset;
+ uint8 *bufp = (uint8*) buf;
+ unsigned char *src = NULL;
+@@ -822,7 +827,7 @@ static int readContigTilesIntoBuffer (TIFF* in, uint8* buf,
+ TIFFError("readContigTilesIntoBuffer", "Integer overflow when calculating buffer size.");
+ exit(-1);
+ }
+- tilebuf = _TIFFmalloc(tile_buffsize + 3);
++ tilebuf = _TIFFmalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 0;
+ tilebuf[tile_buffsize] = 0;
+@@ -986,7 +991,7 @@ static int readSeparateTilesIntoBuffer (TIFF* in, uint8 *obuf,
+ for (sample = 0; (sample < spp) && (sample < MAX_SAMPLES); sample++)
+ {
+ srcbuffs[sample] = NULL;
+- tbuff = (unsigned char *)_TIFFmalloc(tilesize + 8);
++ tbuff = (unsigned char *)_TIFFmalloc(tilesize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!tbuff)
+ {
+ TIFFError ("readSeparateTilesIntoBuffer",
+@@ -1181,7 +1186,8 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ }
+ rowstripsize = rowsperstrip * bytes_per_sample * (width + 1);
+
+- obuf = _TIFFmalloc (rowstripsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ obuf = _TIFFmalloc (rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (obuf == NULL)
+ return 1;
+
+@@ -1194,7 +1200,7 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ stripsize = TIFFVStripSize(out, nrows);
+ src = buf + (row * rowsize);
+ total_bytes += stripsize;
+- memset (obuf, '\0', rowstripsize);
++ memset (obuf, '\0',rowstripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (extractContigSamplesToBuffer(obuf, src, nrows, width, s, spp, bps, dump))
+ {
+ _TIFFfree(obuf);
+@@ -1202,10 +1208,15 @@ writeBufferToSeparateStrips (TIFF* out, uint8* buf,
+ }
+ if ((dump->outfile != NULL) && (dump->level == 1))
+ {
+- dump_info(dump->outfile, dump->format,"",
++ if ((uint64_t)scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ (uint64_t)scanlinesize);
++ }
++ dump_info(dump->outfile, dump->format,"",
+ "Sample %2d, Strip: %2d, bytes: %4d, Row %4d, bytes: %4d, Input offset: %6d",
+- s + 1, strip + 1, stripsize, row + 1, scanlinesize, src - buf);
+- dump_buffer(dump->outfile, dump->format, nrows, scanlinesize, row, obuf);
++ s + 1, strip + 1, stripsize, row + 1, (uint32)scanlinesize, src - buf);
++ dump_buffer(dump->outfile, dump->format, nrows, (uint32)scanlinesize, row, obuf);
+ }
+
+ if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0)
+@@ -1232,7 +1243,7 @@ static int writeBufferToContigTiles (TIFF* out, uint8* buf, uint32 imagelength,
+ uint32 tl, tw;
+ uint32 row, col, nrow, ncol;
+ uint32 src_rowsize, col_offset;
+- uint32 tile_rowsize = TIFFTileRowSize(out);
++ tmsize_t tile_rowsize = TIFFTileRowSize(out);
+ uint8* bufp = (uint8*) buf;
+ tsize_t tile_buffsize = 0;
+ tsize_t tilesize = TIFFTileSize(out);
+@@ -1275,9 +1286,11 @@ static int writeBufferToContigTiles (TIFF* out, uint8* buf, uint32 imagelength,
+ }
+ src_rowsize = ((imagewidth * spp * bps) + 7U) / 8;
+
+- tilebuf = _TIFFmalloc(tile_buffsize);
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tilebuf = _TIFFmalloc(tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (tilebuf == 0)
+ return 1;
++ memset(tilebuf, 0, tile_buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ for (row = 0; row < imagelength; row += tl)
+ {
+ nrow = (row + tl > imagelength) ? imagelength - row : tl;
+@@ -1323,7 +1336,8 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8* buf, uint32 imagelength
+ uint32 imagewidth, tsample_t spp,
+ struct dump_opts * dump)
+ {
+- tdata_t obuf = _TIFFmalloc(TIFFTileSize(out));
++ /* Add 3 padding bytes for extractContigSamples32bits */
++ tdata_t obuf = _TIFFmalloc(TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+ uint32 tl, tw;
+ uint32 row, col, nrow, ncol;
+ uint32 src_rowsize, col_offset;
+@@ -1333,6 +1347,7 @@ static int writeBufferToSeparateTiles (TIFF* out, uint8* buf, uint32 imagelength
+
+ if (obuf == NULL)
+ return 1;
++ memset(obuf, 0, TIFFTileSize(out) + NUM_BUFF_OVERSIZE_BYTES);
+
+ TIFFGetField(out, TIFFTAG_TILELENGTH, &tl);
+ TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw);
+@@ -1754,14 +1769,14 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+
+ *opt_offset = '\0';
+ /* convert option to lowercase */
+- end = strlen (opt_ptr);
++ end = (unsigned int)strlen (opt_ptr);
+ for (i = 0; i < end; i++)
+ *(opt_ptr + i) = tolower((int) *(opt_ptr + i));
+ /* Look for dump format specification */
+ if (strncmp(opt_ptr, "for", 3) == 0)
+ {
+ /* convert value to lowercase */
+- end = strlen (opt_offset + 1);
++ end = (unsigned int)strlen (opt_offset + 1);
+ for (i = 1; i <= end; i++)
+ *(opt_offset + i) = tolower((int) *(opt_offset + i));
+ /* check dump format value */
+@@ -2213,6 +2228,8 @@ main(int argc, char* argv[])
+ size_t length;
+ char temp_filename[PATH_MAX + 16]; /* Extra space keeps the compiler from complaining */
+
++ assert(NUM_BUFF_OVERSIZE_BYTES >= 3);
++
+ little_endian = *((unsigned char *)&little_endian) & '1';
+
+ initImageData(&image);
+@@ -3114,13 +3131,13 @@ extractContigSamples32bits (uint8 *in, uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3495,13 +3512,13 @@ extractContigSamplesShifted32bits (uint8 *in, uint8 *out, uint32 cols,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -3678,10 +3695,10 @@ extractContigSamplesToTileBuffer(uint8 *out, uint8 *in, uint32 rows, uint32 cols
+ static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
+ {
+ uint8* bufp = buf;
+- int32 bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint32 strip, nstrips = TIFFNumberOfStrips(in);
+- uint32 stripsize = TIFFStripSize(in);
+- uint32 rows = 0;
++ tmsize_t stripsize = TIFFStripSize(in);
++ tmsize_t rows = 0;
+ uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
+ tsize_t scanline_size = TIFFScanlineSize(in);
+
+@@ -3694,13 +3711,12 @@ static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
+ bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
+ rows = bytes_read / scanline_size;
+ if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
+- TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
+- (int)strip + 1, (unsigned long) bytes_read,
+- (unsigned long)stripsize);
++ TIFFError("", "Strip %"PRIu32": read %"PRId64" bytes, strip size %"PRIu64,
++ strip + 1, bytes_read, stripsize);
+
+ if (bytes_read < 0 && !ignore) {
+- TIFFError("", "Error reading strip %lu after %lu rows",
+- (unsigned long) strip, (unsigned long)rows);
++ TIFFError("", "Error reading strip %"PRIu32" after %"PRIu64" rows",
++ strip, rows);
+ return 0;
+ }
+ bufp += stripsize;
+@@ -4164,13 +4180,13 @@ combineSeparateSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4213,10 +4229,10 @@ combineSeparateSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4689,13 +4705,13 @@ combineSeparateTileSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ /* If we have a full buffer's worth, write it out */
+ if (ready_bits >= 32)
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -4738,10 +4754,10 @@ combineSeparateTileSamples32bits (uint8 *in[], uint8 *out, uint32 cols,
+ "Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
+ row + 1, col + 1, src_byte, src_bit, dst - out);
+
+- dump_long (dumpfile, format, "Match bits ", matchbits);
++ dump_wide (dumpfile, format, "Match bits ", matchbits);
+ dump_data (dumpfile, format, "Src bits ", src, 4);
+- dump_long (dumpfile, format, "Buff1 bits ", buff1);
+- dump_long (dumpfile, format, "Buff2 bits ", buff2);
++ dump_wide (dumpfile, format, "Buff1 bits ", buff1);
++ dump_wide (dumpfile, format, "Buff2 bits ", buff2);
+ dump_byte (dumpfile, format, "Write bits1", bytebuff1);
+ dump_byte (dumpfile, format, "Write bits2", bytebuff2);
+ dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
+@@ -4764,7 +4780,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ {
+ int i, bytes_per_sample, bytes_per_pixel, shift_width, result = 1;
+ uint32 j;
+- int32 bytes_read = 0;
++ tmsize_t bytes_read = 0;
+ uint16 bps = 0, planar;
+ uint32 nstrips;
+ uint32 strips_per_sample;
+@@ -4830,7 +4846,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++)
+ {
+ srcbuffs[s] = NULL;
+- buff = _TIFFmalloc(stripsize + 3);
++ buff = _TIFFmalloc(stripsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!buff)
+ {
+ TIFFError ("readSeparateStripsIntoBuffer",
+@@ -4853,7 +4869,7 @@ static int readSeparateStripsIntoBuffer (TIFF *in, uint8 *obuf, uint32 length,
+ buff = srcbuffs[s];
+ strip = (s * strips_per_sample) + j;
+ bytes_read = TIFFReadEncodedStrip (in, strip, buff, stripsize);
+- rows_this_strip = bytes_read / src_rowsize;
++ rows_this_strip = (uint32)(bytes_read / src_rowsize);
+ if (bytes_read < 0 && !ignore)
+ {
+ TIFFError(TIFFFileName(in),
+@@ -5860,13 +5876,14 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint16 input_compression = 0, input_photometric = 0;
+ uint16 subsampling_horiz, subsampling_vert;
+ uint32 width = 0, length = 0;
+- uint32 stsize = 0, tlsize = 0, buffsize = 0, scanlinesize = 0;
++ tmsize_t stsize = 0, tlsize = 0, buffsize = 0;
++ tmsize_t scanlinesize = 0;
+ uint32 tw = 0, tl = 0; /* Tile width and length */
+- uint32 tile_rowsize = 0;
++ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+ unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static uint32 prev_readsize = 0;
++ static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6168,7 +6185,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize+3);
++ read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+@@ -6179,11 +6196,11 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+ return (-1);
+ }
+- new_buff = _TIFFrealloc(read_buff, buffsize+3);
++ new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (read_buff);
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize+3);
++ read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ read_buff = new_buff;
+@@ -6256,8 +6273,13 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ dump_info (dump->infile, dump->format, "",
+ "Bits per sample %d, Samples per pixel %d", bps, spp);
+
++ if ((uint64_t)scanlinesize > 0x0ffffffffULL) {
++ dump_info(dump->infile, dump->format, "loadImage",
++ "Attention: scanlinesize %"PRIu64" is larger than UINT32_MAX.\nFollowing dump might be wrong.",
++ (uint64_t)scanlinesize);
++ }
+ for (i = 0; i < length; i++)
+- dump_buffer(dump->infile, dump->format, 1, scanlinesize,
++ dump_buffer(dump->infile, dump->format, 1, (uint32)scanlinesize,
+ i, read_buff + (i * scanlinesize));
+ }
+ return (0);
+@@ -7277,13 +7299,13 @@ writeSingleSection(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -7346,23 +7368,23 @@ createImageSection(uint32 sectsize, unsigned char **sect_buff_ptr)
+
+ if (!sect_buff)
+ {
+- sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
++ sect_buff = (unsigned char *)_TIFFmalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!sect_buff)
+ {
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ {
+ if (prev_sectsize < sectsize)
+ {
+- new_buff = _TIFFrealloc(sect_buff, sectsize);
++ new_buff = _TIFFrealloc(sect_buff, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (sect_buff);
+- sect_buff = (unsigned char *)_TIFFmalloc(sectsize);
++ sect_buff = (unsigned char *)_TIFFmalloc(sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ sect_buff = new_buff;
+@@ -7372,7 +7394,7 @@ createImageSection(uint32 sectsize, unsigned char **sect_buff_ptr)
+ TIFFError("createImageSection", "Unable to allocate/reallocate section buffer");
+ return (-1);
+ }
+- _TIFFmemset(sect_buff, 0, sectsize);
++ _TIFFmemset(sect_buff, 0, sectsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -7403,17 +7425,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[0].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7426,7 +7448,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[0].buffer = crop_buff;
+ seg_buffs[0].size = cropsize;
+
+@@ -7505,17 +7527,17 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ cropsize = crop->bufftotal;
+ crop_buff = seg_buffs[i].buffer;
+ if (!crop_buff)
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+ prev_cropsize = seg_buffs[0].size;
+ if (prev_cropsize < cropsize)
+ {
+- next_buff = _TIFFrealloc(crop_buff, cropsize);
++ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (! next_buff)
+ {
+ _TIFFfree (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = next_buff;
+@@ -7528,7 +7550,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ return (-1);
+ }
+
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ seg_buffs[i].buffer = crop_buff;
+ seg_buffs[i].size = cropsize;
+
+@@ -7641,24 +7663,24 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ crop_buff = *crop_buff_ptr;
+ if (!crop_buff)
+ {
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!crop_buff)
+ {
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ prev_cropsize = cropsize;
+ }
+ else
+ {
+ if (prev_cropsize < cropsize)
+ {
+- new_buff = _TIFFrealloc(crop_buff, cropsize);
++ new_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!new_buff)
+ {
+ free (crop_buff);
+- crop_buff = (unsigned char *)_TIFFmalloc(cropsize);
++ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ else
+ crop_buff = new_buff;
+@@ -7667,7 +7689,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ TIFFError("createCroppedImage", "Unable to allocate/reallocate crop buffer");
+ return (-1);
+ }
+- _TIFFmemset(crop_buff, 0, cropsize);
++ _TIFFmemset(crop_buff, 0, cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ }
+ }
+
+@@ -7965,13 +7987,13 @@ writeCroppedImage(TIFF *in, TIFF *out, struct image_data *image,
+ if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) {
+ TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks);
+ if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) {
+- int inknameslen = strlen(inknames) + 1;
++ int inknameslen = (int)strlen(inknames) + 1;
+ const char* cp = inknames;
+ while (ninks > 1) {
+ cp = strchr(cp, '\0');
+ if (cp) {
+ cp++;
+- inknameslen += (strlen(cp) + 1);
++ inknameslen += ((int)strlen(cp) + 1);
+ }
+ ninks--;
+ }
+@@ -8356,13 +8378,13 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -8431,12 +8453,13 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ return (-1);
+ }
+
+- if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize)))
++ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
++ if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize);
++ TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+- _TIFFmemset(rbuff, '\0', buffsize);
++ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+@@ -8964,13 +8987,13 @@ reverseSamples32bits (uint16 spp, uint16 bps, uint32 width,
+ }
+ else /* If we have a full buffer's worth, write it out */
+ {
+- bytebuff1 = (buff2 >> 56);
++ bytebuff1 = (uint8)(buff2 >> 56);
+ *dst++ = bytebuff1;
+- bytebuff2 = (buff2 >> 48);
++ bytebuff2 = (uint8)(buff2 >> 48);
+ *dst++ = bytebuff2;
+- bytebuff3 = (buff2 >> 40);
++ bytebuff3 = (uint8)(buff2 >> 40);
+ *dst++ = bytebuff3;
+- bytebuff4 = (buff2 >> 32);
++ bytebuff4 = (uint8)(buff2 >> 32);
+ *dst++ = bytebuff4;
+ ready_bits -= 32;
+
+@@ -9061,12 +9084,13 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ {
+ case MIRROR_BOTH:
+ case MIRROR_VERT:
+- line_buff = (unsigned char *)_TIFFmalloc(rowsize);
++ line_buff = (unsigned char *)_TIFFmalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (line_buff == NULL)
+ {
+- TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize);
++ TIFFError ("mirrorImage", "Unable to allocate mirror line buffer of %1u bytes", rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+
+ dst = ibuff + (rowsize * (length - 1));
+ for (row = 0; row < length / 2; row++)
+@@ -9098,11 +9122,12 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ }
+ else
+ { /* non 8 bit per sample data */
+- if (!(line_buff = (unsigned char *)_TIFFmalloc(rowsize + 1)))
++ if (!(line_buff = (unsigned char *)_TIFFmalloc(rowsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+ TIFFError("mirrorImage", "Unable to allocate mirror line buffer");
+ return (-1);
+ }
++ _TIFFmemset(line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+ if (bytes_per_pixel < (bytes_per_sample + 1))
+@@ -9114,7 +9139,7 @@ mirrorImage(uint16 spp, uint16 bps, uint16 mirror, uint32 width, uint32 length,
+ {
+ row_offset = row * rowsize;
+ src = ibuff + row_offset;
+- _TIFFmemset (line_buff, '\0', rowsize);
++ _TIFFmemset (line_buff, '\0', rowsize + NUM_BUFF_OVERSIZE_BYTES);
+ switch (shift_width)
+ {
+ case 1: if (reverseSamples16bits(spp, bps, width, src, line_buff))
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch
new file mode 100644
index 0000000000..18a4b4e0ff
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3597_3626_3627.patch
@@ -0,0 +1,123 @@
+From f7c06c395daf1b2c52ab431e00db2d9fc2ac993e Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 10 May 2022 20:03:17 +0000
+Subject: [PATCH] tiffcrop: Fix issue #330 and some more from 320 to 349
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3597 CVE-2022-3626 CVE-2022-3627
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/e319508023580e2f70e6e626f745b5b2a1707313
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/8fe3735942ea1d90d8cef843b55b3efe8ab6feaf
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/bad48e90b410df32172006c7876da449ba62cdba
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/236b7191f04c60d09ee836ae13b50f812c841047
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+---
+ tools/tiffcrop.c | 50 ++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 42 insertions(+), 8 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index c923920..a0789a3 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -103,7 +103,12 @@
+ * selects which functions dump data, with higher numbers selecting
+ * lower level, scanline level routines. Debug reports a limited set
+ * of messages to monitor progess without enabling dump logs.
+- */
++ *
++ * Note 1: The (-X|-Y), -Z, -z and -S options are mutually exclusive.
++ * In no case should the options be applied to a given selection successively.
++ * Note 2: Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options
++ * such as -H, -V, -P, -J or -K are not supported and may cause buffer overflows.
++ */
+
+ static char tiffcrop_version_id[] = "2.4.1";
+ static char tiffcrop_rev_date[] = "03-03-2010";
+@@ -176,12 +181,12 @@ extern int getopt(int argc, char * const argv[], const char *optstring);
+ #define ROTATECW_270 32
+ #define ROTATE_ANY (ROTATECW_90 | ROTATECW_180 | ROTATECW_270)
+
+-#define CROP_NONE 0
+-#define CROP_MARGINS 1
+-#define CROP_WIDTH 2
+-#define CROP_LENGTH 4
+-#define CROP_ZONES 8
+-#define CROP_REGIONS 16
++#define CROP_NONE 0 /* "-S" -> Page_MODE_ROWSCOLS and page->rows/->cols != 0 */
++#define CROP_MARGINS 1 /* "-m" */
++#define CROP_WIDTH 2 /* "-X" */
++#define CROP_LENGTH 4 /* "-Y" */
++#define CROP_ZONES 8 /* "-Z" */
++#define CROP_REGIONS 16 /* "-z" */
+ #define CROP_ROTATE 32
+ #define CROP_MIRROR 64
+ #define CROP_INVERT 128
+@@ -323,7 +328,7 @@ struct crop_mask {
+ #define PAGE_MODE_RESOLUTION 1
+ #define PAGE_MODE_PAPERSIZE 2
+ #define PAGE_MODE_MARGINS 4
+-#define PAGE_MODE_ROWSCOLS 8
++#define PAGE_MODE_ROWSCOLS 8 /* for -S option */
+
+ #define INVERT_DATA_ONLY 10
+ #define INVERT_DATA_AND_TAG 11
+@@ -754,6 +759,12 @@ static char* usage_info[] = {
+ " The four debug/dump options are independent, though it makes little sense to",
+ " specify a dump file without specifying a detail level.",
+ " ",
++"Note 1: The (-X|-Y), -Z, -z and -S options are mutually exclusive.",
++" In no case should the options be applied to a given selection successively.",
++" ",
++"Note 2: Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options",
++" such as - H, -V, -P, -J or -K are not supported and may cause buffer overflows.",
++" ",
+ NULL
+ };
+
+@@ -2112,6 +2123,27 @@ void process_command_opts (int argc, char *argv[], char *mp, char *mode, uint32
+ /*NOTREACHED*/
+ }
+ }
++ /*-- Check for not allowed combinations (e.g. -X, -Y and -Z, -z and -S are mutually exclusive) --*/
++ char XY, Z, R, S;
++ XY = ((crop_data->crop_mode & CROP_WIDTH) || (crop_data->crop_mode & CROP_LENGTH)) ? 1 : 0;
++ Z = (crop_data->crop_mode & CROP_ZONES) ? 1 : 0;
++ R = (crop_data->crop_mode & CROP_REGIONS) ? 1 : 0;
++ S = (page->mode & PAGE_MODE_ROWSCOLS) ? 1 : 0;
++ if (XY + Z + R + S > 1) {
++ TIFFError("tiffcrop input error", "The crop options(-X|-Y), -Z, -z and -S are mutually exclusive.->exit");
++ exit(EXIT_FAILURE);
++ }
++
++ /* Check for not allowed combination:
++ * Any of the -X, -Y, -Z and -z options together with other PAGE_MODE_x options
++ * such as -H, -V, -P, -J or -K are not supported and may cause buffer overflows.
++. */
++ if ((XY + Z + R > 0) && page->mode != PAGE_MODE_NONE) {
++ TIFFError("tiffcrop input error",
++ "Any of the crop options -X, -Y, -Z and -z together with other PAGE_MODE_x options such as - H, -V, -P, -J or -K is not supported and may cause buffer overflows..->exit");
++ exit(EXIT_FAILURE);
++ }
++
+ } /* end process_command_opts */
+
+ /* Start a new output file if one has not been previously opened or
+@@ -2384,6 +2416,7 @@ main(int argc, char* argv[])
+ exit (-1);
+ }
+
++ /* Crop input image and copy zones and regions from input image into seg_buffs or crop_buff. */
+ if (crop.selections > 0)
+ {
+ if (processCropSelections(&image, &crop, &read_buff, seg_buffs))
+@@ -2400,6 +2433,7 @@ main(int argc, char* argv[])
+ exit (-1);
+ }
+ }
++ /* Format and write selected image parts to output file(s). */
+ if (page.mode == PAGE_MODE_NONE)
+ { /* Whole image or sections not based on output page size */
+ if (crop.selections > 0)
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch
new file mode 100644
index 0000000000..b3232d9002
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3599.patch
@@ -0,0 +1,277 @@
+From 01bca7e6f608da7696949fca6acda78b9935ba19 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 30 Aug 2022 16:56:48 +0200
+Subject: [PATCH] Revised handling of TIFFTAG_INKNAMES and related
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3599 CVE-2022-4645 CVE-2023-30774
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/e813112545942107551433d61afd16ac094ff246
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ TIFFTAG_NUMBEROFINKS value
+
+In order to solve the buffer overflow issues related to TIFFTAG_INKNAMES and related TIFFTAG_NUMBEROFINKS value, a revised handling of those tags within LibTiff is proposed:
+
+Behaviour for writing:
+ `NumberOfInks` MUST fit to the number of inks in the `InkNames` string.
+ `NumberOfInks` is automatically set when `InkNames` is set.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+Behaviour for reading:
+ When reading `InkNames` from a TIFF file, the `NumberOfInks` will be set automatically to the number of inks in `InkNames` string.
+ If `NumberOfInks` is different to the number of inks within `InkNames` string, that will be corrected and a warning is issued.
+ If `NumberOfInks` is not equal to samplesperpixel only a warning will be issued.
+
+This allows the safe use of the NumberOfInks value to read out the InkNames without buffer overflow
+
+This MR will close the following issues: #149, #150, #152, #168 (to be checked), #250, #269, #398 and #456.
+
+It also fixes the old bug at http://bugzilla.maptools.org/show_bug.cgi?id=2599, for which the limitation of `NumberOfInks = SPP` was introduced, which is in my opinion not necessary and does not solve the general issue.
+
+---
+ libtiff/tif_dir.c | 120 ++++++++++++++++++++++++-----------------
+ libtiff/tif_dir.h | 2 +
+ libtiff/tif_dirinfo.c | 2 +-
+ libtiff/tif_dirwrite.c | 5 ++
+ libtiff/tif_print.c | 4 ++
+ 5 files changed, 83 insertions(+), 50 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 39aeeb4..9d8267a 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -29,6 +29,7 @@
+ * (and also some miscellaneous stuff)
+ */
+ #include "tiffiop.h"
++# include <inttypes.h>
+
+ /*
+ * These are used in the backwards compatibility code...
+@@ -137,32 +138,30 @@ setExtraSamples(TIFF* tif, va_list ap, uint32* v)
+ }
+
+ /*
+- * Confirm we have "samplesperpixel" ink names separated by \0. Returns
++ * Count ink names separated by \0. Returns
+ * zero if the ink names are not as expected.
+ */
+-static uint32
+-checkInkNamesString(TIFF* tif, uint32 slen, const char* s)
++static uint16
++countInkNamesString(TIFF *tif, uint32 slen, const char *s)
+ {
+- TIFFDirectory* td = &tif->tif_dir;
+- uint16 i = td->td_samplesperpixel;
++ uint16 i = 0;
++ const char *ep = s + slen;
++ const char *cp = s;
+
+ if (slen > 0) {
+- const char* ep = s+slen;
+- const char* cp = s;
+- for (; i > 0; i--) {
++ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+ goto bad;
+ cp++; /* skip \0 */
+- }
+- return ((uint32)(cp-s));
++ i++;
++ } while (cp < ep);
++ return (i);
+ }
+ bad:
+ TIFFErrorExt(tif->tif_clientdata, "TIFFSetField",
+- "%s: Invalid InkNames value; expecting %d names, found %d",
+- tif->tif_name,
+- td->td_samplesperpixel,
+- td->td_samplesperpixel-i);
++ "%s: Invalid InkNames value; no NUL at given buffer end location %"PRIu32", after %"PRIu16" ink",
++ tif->tif_name, slen, i);
+ return (0);
+ }
+
+@@ -476,13 +475,61 @@ _TIFFVSetField(TIFF* tif, uint32 tag, va_list ap)
+ _TIFFsetFloatArray(&td->td_refblackwhite, va_arg(ap, float*), 6);
+ break;
+ case TIFFTAG_INKNAMES:
+- v = (uint16) va_arg(ap, uint16_vap);
+- s = va_arg(ap, char*);
+- v = checkInkNamesString(tif, v, s);
+- status = v > 0;
+- if( v > 0 ) {
+- _TIFFsetNString(&td->td_inknames, s, v);
+- td->td_inknameslen = v;
++ {
++ v = (uint16) va_arg(ap, uint16_vap);
++ s = va_arg(ap, char*);
++ uint16 ninksinstring;
++ ninksinstring = countInkNamesString(tif, v, s);
++ status = ninksinstring > 0;
++ if(ninksinstring > 0 ) {
++ _TIFFsetNString(&td->td_inknames, s, v);
++ td->td_inknameslen = v;
++ /* Set NumberOfInks to the value ninksinstring */
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (td->td_numberofinks != ninksinstring) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the number of inks %"PRIu16".\n -> NumberOfInks value adapted to %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, ninksinstring, ninksinstring);
++ td->td_numberofinks = ninksinstring;
++ }
++ } else {
++ td->td_numberofinks = ninksinstring;
++ TIFFSetFieldBit(tif, FIELD_NUMBEROFINKS);
++ }
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu16" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, td->td_numberofinks, td->td_samplesperpixel);
++ }
++ }
++ }
++ }
++ break;
++ case TIFFTAG_NUMBEROFINKS:
++ v = (uint16)va_arg(ap, uint16_vap);
++ /* If InkNames already set also NumberOfInks is set accordingly and should be equal */
++ if (TIFFFieldSet(tif, FIELD_INKNAMES))
++ {
++ if (v != td->td_numberofinks) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Error %s; Tag %s:\n It is not possible to set the value %"PRIu32" for NumberOfInks\n which is different from the number of inks in the InkNames tag (%"PRIu16")",
++ tif->tif_name, fip->field_name, v, td->td_numberofinks);
++ /* Do not set / overwrite number of inks already set by InkNames case accordingly. */
++ status = 0;
++ }
++ } else {
++ td->td_numberofinks = (uint16)v;
++ if (TIFFFieldSet(tif, FIELD_SAMPLESPERPIXEL))
++ {
++ if (td->td_numberofinks != td->td_samplesperpixel) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Warning %s; Tag %s:\n Value %"PRIu32" of NumberOfInks is different from the SamplesPerPixel value %"PRIu16"",
++ tif->tif_name, fip->field_name, v, td->td_samplesperpixel);
++ }
++ }
+ }
+ break;
+ case TIFFTAG_PERSAMPLE:
+@@ -887,34 +934,6 @@ _TIFFVGetField(TIFF* tif, uint32 tag, va_list ap)
+ if (fip->field_bit == FIELD_CUSTOM) {
+ standard_tag = 0;
+ }
+-
+- if( standard_tag == TIFFTAG_NUMBEROFINKS )
+- {
+- int i;
+- for (i = 0; i < td->td_customValueCount; i++) {
+- uint16 val;
+- TIFFTagValue *tv = td->td_customValues + i;
+- if (tv->info->field_tag != standard_tag)
+- continue;
+- if( tv->value == NULL )
+- return 0;
+- val = *(uint16 *)tv->value;
+- /* Truncate to SamplesPerPixel, since the */
+- /* setting code for INKNAMES assume that there are SamplesPerPixel */
+- /* inknames. */
+- /* Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2599 */
+- if( val > td->td_samplesperpixel )
+- {
+- TIFFWarningExt(tif->tif_clientdata,"_TIFFVGetField",
+- "Truncating NumberOfInks from %u to %u",
+- val, td->td_samplesperpixel);
+- val = td->td_samplesperpixel;
+- }
+- *va_arg(ap, uint16*) = val;
+- return 1;
+- }
+- return 0;
+- }
+
+ switch (standard_tag) {
+ case TIFFTAG_SUBFILETYPE:
+@@ -1092,6 +1111,9 @@ _TIFFVGetField(TIFF* tif, uint32 tag, va_list ap)
+ case TIFFTAG_INKNAMES:
+ *va_arg(ap, char**) = td->td_inknames;
+ break;
++ case TIFFTAG_NUMBEROFINKS:
++ *va_arg(ap, uint16 *) = td->td_numberofinks;
++ break;
+ default:
+ {
+ int i;
+diff --git a/libtiff/tif_dir.h b/libtiff/tif_dir.h
+index e7f0667..7cad679 100644
+--- a/libtiff/tif_dir.h
++++ b/libtiff/tif_dir.h
+@@ -117,6 +117,7 @@ typedef struct {
+ /* CMYK parameters */
+ int td_inknameslen;
+ char* td_inknames;
++ uint16 td_numberofinks; /* number of inks in InkNames string */
+
+ int td_customValueCount;
+ TIFFTagValue *td_customValues;
+@@ -174,6 +175,7 @@ typedef struct {
+ #define FIELD_TRANSFERFUNCTION 44
+ #define FIELD_INKNAMES 46
+ #define FIELD_SUBIFD 49
++#define FIELD_NUMBEROFINKS 50
+ /* FIELD_CUSTOM (see tiffio.h) 65 */
+ /* end of support for well-known tags; codec-private tags follow */
+ #define FIELD_CODEC 66 /* base of codec-private tags */
+diff --git a/libtiff/tif_dirinfo.c b/libtiff/tif_dirinfo.c
+index fbfaaf0..bf7de70 100644
+--- a/libtiff/tif_dirinfo.c
++++ b/libtiff/tif_dirinfo.c
+@@ -104,7 +104,7 @@ tiffFields[] = {
+ { TIFFTAG_SUBIFD, -1, -1, TIFF_IFD8, 0, TIFF_SETGET_C16_IFD8, TIFF_SETGET_UNDEFINED, FIELD_SUBIFD, 1, 1, "SubIFD", (TIFFFieldArray*) &tiffFieldArray },
+ { TIFFTAG_INKSET, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "InkSet", NULL },
+ { TIFFTAG_INKNAMES, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_C16_ASCII, TIFF_SETGET_UNDEFINED, FIELD_INKNAMES, 1, 1, "InkNames", NULL },
+- { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "NumberOfInks", NULL },
++ { TIFFTAG_NUMBEROFINKS, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UNDEFINED, FIELD_NUMBEROFINKS, 1, 0, "NumberOfInks", NULL },
+ { TIFFTAG_DOTRANGE, 2, 2, TIFF_SHORT, 0, TIFF_SETGET_UINT16_PAIR, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 0, 0, "DotRange", NULL },
+ { TIFFTAG_TARGETPRINTER, -1, -1, TIFF_ASCII, 0, TIFF_SETGET_ASCII, TIFF_SETGET_UNDEFINED, FIELD_CUSTOM, 1, 0, "TargetPrinter", NULL },
+ { TIFFTAG_EXTRASAMPLES, -1, -1, TIFF_SHORT, 0, TIFF_SETGET_C16_UINT16, TIFF_SETGET_UNDEFINED, FIELD_EXTRASAMPLES, 0, 1, "ExtraSamples", NULL },
+diff --git a/libtiff/tif_dirwrite.c b/libtiff/tif_dirwrite.c
+index 9e4d306..a2dbc3b 100644
+--- a/libtiff/tif_dirwrite.c
++++ b/libtiff/tif_dirwrite.c
+@@ -677,6 +677,11 @@ TIFFWriteDirectorySec(TIFF* tif, int isimage, int imagedone, uint64* pdiroff)
+ if (!TIFFWriteDirectoryTagAscii(tif,&ndir,dir,TIFFTAG_INKNAMES,tif->tif_dir.td_inknameslen,tif->tif_dir.td_inknames))
+ goto bad;
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS))
++ {
++ if (!TIFFWriteDirectoryTagShort(tif, &ndir, dir, TIFFTAG_NUMBEROFINKS, tif->tif_dir.td_numberofinks))
++ goto bad;
++ }
+ if (TIFFFieldSet(tif,FIELD_SUBIFD))
+ {
+ if (!TIFFWriteDirectoryTagSubifd(tif,&ndir,dir))
+diff --git a/libtiff/tif_print.c b/libtiff/tif_print.c
+index a073794..a9f05a7 100644
+--- a/libtiff/tif_print.c
++++ b/libtiff/tif_print.c
+@@ -402,6 +402,10 @@ TIFFPrintDirectory(TIFF* tif, FILE* fd, long flags)
+ }
+ fputs("\n", fd);
+ }
++ if (TIFFFieldSet(tif, FIELD_NUMBEROFINKS)) {
++ fprintf(fd, " NumberOfInks: %d\n",
++ td->td_numberofinks);
++ }
+ if (TIFFFieldSet(tif,FIELD_THRESHHOLDING)) {
+ fprintf(fd, " Thresholding: ");
+ switch (td->td_threshholding) {
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch
new file mode 100644
index 0000000000..ea70827cbe
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-3970.patch
@@ -0,0 +1,45 @@
+From 7e87352217d1f0c77eee7033ac59e3aab08532bb Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 8 Nov 2022 15:16:58 +0100
+Subject: [PATCH] TIFFReadRGBATileExt(): fix (unsigned) integer overflow on
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2022-3970
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/227500897dfb07fb7d27f7aa570050e62617e3be
+Reviewed-by: Sylvain Beucler <beuc@debian.org>
+Last-Update: 2023-01-17
+
+ strips/tiles > 2 GB
+
+Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=53137
+
+---
+ libtiff/tif_getimage.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/libtiff/tif_getimage.c b/libtiff/tif_getimage.c
+index 96ab146..0b90dcc 100644
+--- a/libtiff/tif_getimage.c
++++ b/libtiff/tif_getimage.c
+@@ -3042,15 +3042,15 @@ TIFFReadRGBATileExt(TIFF* tif, uint32 col, uint32 row, uint32 * raster, int stop
+ return( ok );
+
+ for( i_row = 0; i_row < read_ysize; i_row++ ) {
+- memmove( raster + (tile_ysize - i_row - 1) * tile_xsize,
+- raster + (read_ysize - i_row - 1) * read_xsize,
++ memmove( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
++ raster + (size_t)(read_ysize - i_row - 1) * read_xsize,
+ read_xsize * sizeof(uint32) );
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize+read_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize+read_xsize,
+ 0, sizeof(uint32) * (tile_xsize - read_xsize) );
+ }
+
+ for( i_row = read_ysize; i_row < tile_ysize; i_row++ ) {
+- _TIFFmemset( raster + (tile_ysize - i_row - 1) * tile_xsize,
++ _TIFFmemset( raster + (size_t)(tile_ysize - i_row - 1) * tile_xsize,
+ 0, sizeof(uint32) * tile_xsize );
+ }
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch
new file mode 100644
index 0000000000..0a88f59553
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-40090.patch
@@ -0,0 +1,548 @@
+From d385738335deb0c4bb70449f12e411f2203c0d01 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 2 Sep 2022 21:20:28 +0200
+Subject: [PATCH 1/4] Improved IFD-Loop Handling (fixes #455)
+
+Basic approach:
+- The order in the entire chain must be checked, and not only whether an offset has already been read once.
+- To do this, pairs of directory number and offset are stored and checked.
+- The offset of a directory number can change.
+- TIFFAdvanceDirectory() must also perform an IFD loop check.
+- TIFFCheckDirOffset() is replaced by _TIFFCheckDirNumberAndOffset().
+
+Rules for the check:
+- If an offset is already in the list, it must have the same IFD number. Otherwise it is an IDF loop.
+- If the offset is not in the list and the IFD number is greater than there are list entries, a new list entry is added.
+- Otherwise, the offset of the IFD number is updated.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2022-40090.patch?h=ubuntu/focal-security
+Upstream commit
+https://gitlab.com/libtiff/libtiff/-/commit/c7caec9a4d8f24c17e667480d2c7d0d51c9fae41]
+CVE: CVE-2022-40090
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_close.c | 6 ++-
+ libtiff/tif_dir.c | 91 +++++++++++++++++++++++++----------------
+ libtiff/tif_dir.h | 1 +
+ libtiff/tif_dirread.c | 94 ++++++++++++++++++++++++++++++-------------
+ libtiff/tif_open.c | 3 +-
+ libtiff/tiffiop.h | 3 +-
+ 6 files changed, 131 insertions(+), 67 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_close.c
++++ tiff-4.1.0+git191117/libtiff/tif_close.c
+@@ -52,8 +52,10 @@ TIFFCleanup(TIFF* tif)
+ (*tif->tif_cleanup)(tif);
+ TIFFFreeDirectory(tif);
+
+- if (tif->tif_dirlist)
+- _TIFFfree(tif->tif_dirlist);
++ if (tif->tif_dirlistoff)
++ _TIFFfree(tif->tif_dirlistoff);
++ if (tif->tif_dirlistdirn)
++ _TIFFfree(tif->tif_dirlistdirn);
+
+ /*
+ * Clean up client info links.
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dir.c
++++ tiff-4.1.0+git191117/libtiff/tif_dir.c
+@@ -1463,12 +1463,22 @@ TIFFDefaultDirectory(TIFF* tif)
+ }
+
+ static int
+-TIFFAdvanceDirectory(TIFF* tif, uint64* nextdir, uint64* off)
++TIFFAdvanceDirectory(TIFF* tif, uint64* nextdiroff, uint64* off, uint16* nextdirnum)
+ {
+ static const char module[] = "TIFFAdvanceDirectory";
++
++ /* Add this directory to the directory list, if not already in. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFErrorExt(tif->tif_clientdata, module, "Starting directory %hu at offset 0x%lx (%lu) might cause an IFD loop",
++ *nextdirnum, *nextdiroff, *nextdiroff);
++ *nextdiroff = 0;
++ *nextdirnum = 0;
++ return(0);
++ }
++
+ if (isMapped(tif))
+ {
+- uint64 poff=*nextdir;
++ uint64 poff=*nextdiroff;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ {
+ tmsize_t poffa,poffb,poffc,poffd;
+@@ -1479,7 +1489,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ if (((uint64)poffa!=poff)||(poffb<poffa)||(poffb<(tmsize_t)sizeof(uint16))||(poffb>tif->tif_size))
+ {
+ TIFFErrorExt(tif->tif_clientdata,module,"Error fetching directory count");
+- *nextdir=0;
++ *nextdiroff=0;
+ return(0);
+ }
+ _TIFFmemcpy(&dircount,tif->tif_base+poffa,sizeof(uint16));
+@@ -1497,7 +1507,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ _TIFFmemcpy(&nextdir32,tif->tif_base+poffc,sizeof(uint32));
+ if (tif->tif_flags&TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+@@ -1529,11 +1539,10 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ }
+ if (off!=NULL)
+ *off=(uint64)poffc;
+- _TIFFmemcpy(nextdir,tif->tif_base+poffc,sizeof(uint64));
++ _TIFFmemcpy(nextdiroff,tif->tif_base+poffc,sizeof(uint64));
+ if (tif->tif_flags&TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return(1);
+ }
+ else
+ {
+@@ -1541,7 +1550,7 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ {
+ uint16 dircount;
+ uint32 nextdir32;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount, sizeof (uint16))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1562,13 +1571,13 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+ TIFFSwabLong(&nextdir32);
+- *nextdir=nextdir32;
++ *nextdiroff=nextdir32;
+ }
+ else
+ {
+ uint64 dircount64;
+ uint16 dircount16;
+- if (!SeekOK(tif, *nextdir) ||
++ if (!SeekOK(tif, *nextdiroff) ||
+ !ReadOK(tif, &dircount64, sizeof (uint64))) {
+ TIFFErrorExt(tif->tif_clientdata, module, "%s: Error fetching directory count",
+ tif->tif_name);
+@@ -1588,17 +1597,27 @@ TIFFAdvanceDirectory(TIFF* tif, uint64*
+ else
+ (void) TIFFSeekFile(tif,
+ dircount16*20, SEEK_CUR);
+- if (!ReadOK(tif, nextdir, sizeof (uint64))) {
++ if (!ReadOK(tif, nextdiroff, sizeof (uint64))) {
+ TIFFErrorExt(tif->tif_clientdata, module,
+ "%s: Error fetching directory link",
+ tif->tif_name);
+ return (0);
+ }
+ if (tif->tif_flags & TIFF_SWAB)
+- TIFFSwabLong8(nextdir);
++ TIFFSwabLong8(nextdiroff);
+ }
+- return (1);
+ }
++ if (*nextdiroff != 0) {
++ (*nextdirnum)++;
++ /* Check next directory for IFD looping and if so, set it as last directory. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, *nextdirnum, *nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module, "the next directory %hu at offset 0x%lx (%lu) might be an IFD loop. Treating directory %hu as last directory",
++ *nextdirnum, *nextdiroff, *nextdiroff, *nextdirnum-1);
++ *nextdiroff = 0;
++ (*nextdirnum)--;
++ }
++ }
++ return (1);
+ }
+
+ /*
+@@ -1608,14 +1627,16 @@ uint16
+ TIFFNumberOfDirectories(TIFF* tif)
+ {
+ static const char module[] = "TIFFNumberOfDirectories";
+- uint64 nextdir;
++ uint64 nextdiroff;
++ uint16 nextdirnum;
+ uint16 n;
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
+ n = 0;
+- while (nextdir != 0 && TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ while (nextdiroff != 0 && TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ {
+ if (n != 65535) {
+ ++n;
+@@ -1638,28 +1659,30 @@ TIFFNumberOfDirectories(TIFF* tif)
+ int
+ TIFFSetDirectory(TIFF* tif, uint16 dirn)
+ {
+- uint64 nextdir;
++ uint64 nextdiroff;
++ uint16 nextdirnum;
+ uint16 n;
+
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+- nextdir = tif->tif_header.classic.tiff_diroff;
++ nextdiroff = tif->tif_header.classic.tiff_diroff;
+ else
+- nextdir = tif->tif_header.big.tiff_diroff;
+- for (n = dirn; n > 0 && nextdir != 0; n--)
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ nextdiroff = tif->tif_header.big.tiff_diroff;
++ nextdirnum = 0;
++ for (n = dirn; n > 0 && nextdiroff != 0; n--)
++ if (!TIFFAdvanceDirectory(tif, &nextdiroff, NULL, &nextdirnum))
+ return (0);
+- tif->tif_nextdiroff = nextdir;
++ /* If the n-th directory could not be reached (does not exist),
++ * return here without touching anything further. */
++ if (nextdiroff == 0 || n > 0)
++ return (0);
++
++ tif->tif_nextdiroff = nextdiroff;
+ /*
+ * Set curdir to the actual directory index. The
+ * -1 is because TIFFReadDirectory will increment
+ * tif_curdir after successfully reading the directory.
+ */
+ tif->tif_curdir = (dirn - n) - 1;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
+- */
+- tif->tif_dirnumber = 0;
+ return (TIFFReadDirectory(tif));
+ }
+
+@@ -1672,13 +1695,42 @@ TIFFSetDirectory(TIFF* tif, uint16 dirn)
+ int
+ TIFFSetSubDirectory(TIFF* tif, uint64 diroff)
+ {
+- tif->tif_nextdiroff = diroff;
+- /*
+- * Reset tif_dirnumber counter and start new list of seen directories.
+- * We need this to prevent IFD loops.
++ /* Match nextdiroff and curdir for consistent IFD-loop checking.
++ * Only with TIFFSetSubDirectory() the IFD list can be corrupted with invalid offsets
++ * within the main IFD tree.
++ * In the case of several subIFDs of a main image,
++ * there are two possibilities that are not even mutually exclusive.
++ * a.) The subIFD tag contains an array with all offsets of the subIFDs.
++ * b.) The SubIFDs are concatenated with their NextIFD parameters.
++ * (refer to https://www.awaresystems.be/imaging/tiff/specification/TIFFPM6.pdf.)
+ */
+- tif->tif_dirnumber = 0;
+- return (TIFFReadDirectory(tif));
++ int retval;
++ uint16 curdir = 0;
++ int8 probablySubIFD = 0;
++ if (diroff == 0) {
++ /* Special case to invalidate the tif_lastdiroff member. */
++ tif->tif_curdir = 65535;
++ } else {
++ if (!_TIFFGetDirNumberFromOffset(tif, diroff, &curdir)) {
++ /* Non-existing offsets might point to a SubIFD or invalid IFD.*/
++ probablySubIFD = 1;
++ }
++ /* -1 because TIFFReadDirectory() will increment tif_curdir. */
++ tif->tif_curdir = curdir - 1;
++ }
++
++ tif->tif_nextdiroff = diroff;
++ retval = TIFFReadDirectory(tif);
++ /* If failed, curdir was not incremented in TIFFReadDirectory(), so set it back. */
++ if (!retval )tif->tif_curdir++;
++ if (retval && probablySubIFD) {
++ /* Reset IFD list to start new one for SubIFD chain and also start SubIFD chain with tif_curdir=0. */
++ tif->tif_dirnumber = 0;
++ tif->tif_curdir = 0; /* first directory of new chain */
++ /* add this offset to new IFD list */
++ _TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir, diroff);
++ }
++ return (retval);
+ }
+
+ /*
+@@ -1702,12 +1754,15 @@ TIFFLastDirectory(TIFF* tif)
+
+ /*
+ * Unlink the specified directory from the directory chain.
++ * Note: First directory starts with number dirn=1.
++ * This is different to TIFFSetDirectory() where the first directory starts with zero.
+ */
+ int
+ TIFFUnlinkDirectory(TIFF* tif, uint16 dirn)
+ {
+ static const char module[] = "TIFFUnlinkDirectory";
+ uint64 nextdir;
++ uint16 nextdirnum;
+ uint64 off;
+ uint16 n;
+
+@@ -1731,19 +1786,21 @@ TIFFUnlinkDirectory(TIFF* tif, uint16 di
+ nextdir = tif->tif_header.big.tiff_diroff;
+ off = 8;
+ }
++ nextdirnum = 0; /* First directory is dirn=0 */
++
+ for (n = dirn-1; n > 0; n--) {
+ if (nextdir == 0) {
+ TIFFErrorExt(tif->tif_clientdata, module, "Directory %d does not exist", dirn);
+ return (0);
+ }
+- if (!TIFFAdvanceDirectory(tif, &nextdir, &off))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, &off, &nextdirnum))
+ return (0);
+ }
+ /*
+ * Advance to the directory to be unlinked and fetch
+ * the offset of the directory that follows.
+ */
+- if (!TIFFAdvanceDirectory(tif, &nextdir, NULL))
++ if (!TIFFAdvanceDirectory(tif, &nextdir, NULL, &nextdirnum))
+ return (0);
+ /*
+ * Go back and patch the link field of the preceding
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dir.h
++++ tiff-4.1.0+git191117/libtiff/tif_dir.h
+@@ -300,6 +300,8 @@ extern int _TIFFMergeFields(TIFF*, const
+ extern const TIFFField* _TIFFFindOrRegisterField(TIFF *, uint32, TIFFDataType);
+ extern TIFFField* _TIFFCreateAnonField(TIFF *, uint32, TIFFDataType);
+ extern int _TIFFCheckFieldIsValidForCodec(TIFF *tif, ttag_t tag);
++extern int _TIFFCheckDirNumberAndOffset(TIFF *tif, uint16 dirn, uint64 diroff);
++extern int _TIFFGetDirNumberFromOffset(TIFF *tif, uint64 diroff, uint16 *dirn);
+
+ #if defined(__cplusplus)
+ }
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -158,7 +158,6 @@ static void TIFFReadDirectoryFindFieldIn
+
+ static int EstimateStripByteCounts(TIFF* tif, TIFFDirEntry* dir, uint16 dircount);
+ static void MissingRequired(TIFF*, const char*);
+-static int TIFFCheckDirOffset(TIFF* tif, uint64 diroff);
+ static int CheckDirCount(TIFF*, TIFFDirEntry*, uint32);
+ static uint16 TIFFFetchDirectory(TIFF* tif, uint64 diroff, TIFFDirEntry** pdir, uint64* nextdiroff);
+ static int TIFFFetchNormalTag(TIFF*, TIFFDirEntry*, int recover);
+@@ -3584,12 +3583,19 @@ TIFFReadDirectory(TIFF* tif)
+ int bitspersample_read = FALSE;
+ int color_channels;
+
+- tif->tif_diroff=tif->tif_nextdiroff;
+- if (!TIFFCheckDirOffset(tif,tif->tif_nextdiroff))
+- return 0; /* last offset or bad offset (IFD looping) */
+- (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
+- tif->tif_curdir++;
+- nextdiroff = tif->tif_nextdiroff;
++ if (tif->tif_nextdiroff == 0) {
++ /* In this special case, tif_diroff needs also to be set to 0. */
++ tif->tif_diroff = tif->tif_nextdiroff;
++ return 0; /* last offset, thus no checking necessary */
++ }
++
++ nextdiroff = tif->tif_nextdiroff;
++ /* tif_curdir++ and tif_nextdiroff should only be updated after SUCCESSFUL reading of the directory. Otherwise, invalid IFD offsets could corrupt the IFD list. */
++ if (!_TIFFCheckDirNumberAndOffset(tif, tif->tif_curdir + 1, nextdiroff)) {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Didn't read next directory due to IFD looping at offset 0x%lx (%lu) to offset 0x%lx (%lu)", tif->tif_diroff, tif->tif_diroff, nextdiroff, nextdiroff);
++ return 0; /* bad offset (IFD looping) */
++ }
+ dircount=TIFFFetchDirectory(tif,nextdiroff,&dir,&tif->tif_nextdiroff);
+ if (!dircount)
+ {
+@@ -3597,6 +3603,11 @@ TIFFReadDirectory(TIFF* tif)
+ "Failed to read directory at offset " TIFF_UINT64_FORMAT,nextdiroff);
+ return 0;
+ }
++ /* Set global values after a valid directory has been fetched.
++ * tif_diroff is already set to nextdiroff in TIFFFetchDirectory() in the beginning. */
++ tif->tif_curdir++;
++ (*tif->tif_cleanup)(tif); /* cleanup any previous compression state */
++
+ TIFFReadDirectoryCheckOrder(tif,dir,dircount);
+
+ /*
+@@ -4628,13 +4639,17 @@ MissingRequired(TIFF* tif, const char* t
+ }
+
+ /*
+- * Check the directory offset against the list of already seen directory
+- * offsets. This is a trick to prevent IFD looping. The one can create TIFF
+- * file with looped directory pointers. We will maintain a list of already
+- * seen directories and check every IFD offset against that list.
++ * Check the directory number and offset against the list of already seen
++ * directory numbers and offsets. This is a trick to prevent IFD looping.
++ * The one can create TIFF file with looped directory pointers. We will
++ * maintain a list of already seen directories and check every IFD offset
++ * and its IFD number against that list. However, the offset of an IFD number
++ * can change - e.g. when writing updates to file.
++ * Returns 1 if all is ok; 0 if last directory or IFD loop is encountered,
++ * or an error has occured.
+ */
+-static int
+-TIFFCheckDirOffset(TIFF* tif, uint64 diroff)
++int
++_TIFFCheckDirNumberAndOffset(TIFF* tif, uint16 dirn, uint64 diroff)
+ {
+ uint16 n;
+
+@@ -4646,35 +4661,64 @@ TIFFCheckDirOffset(TIFF* tif, uint64 dir
+ return 0;
+ }
+
+- for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlist; n++) {
+- if (tif->tif_dirlist[n] == diroff)
+- return 0;
++ /* Check if offset is already in the list:
++ * - yes: check, if offset is at the same IFD number - if not, it is an IFD loop
++ * - no: add to list or update offset at that IFD number
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ return 1;
++ } else {
++ TIFFWarningExt(tif->tif_clientdata, "_TIFFCheckDirNumberAndOffset",
++ "TIFF directory %hu has IFD looping to directory %hu at offset 0x%lx (%lu)",
++ dirn-1, tif->tif_dirlistdirn[n], diroff, diroff);
++ return 0;
++ }
++ }
++ }
++ /* Check if offset of an IFD has been changed and update offset of that IFD number. */
++ if (dirn < tif->tif_dirnumber && tif->tif_dirlistdirn && tif->tif_dirlistoff) {
++ /* tif_dirlistdirn can have IFD numbers dirn in random order */
++ for (n = 0; n < tif->tif_dirnumber; n++) {
++ if (tif->tif_dirlistdirn[n] == dirn) {
++ tif->tif_dirlistoff[n] = diroff;
++ return 1;
++ }
++ }
+ }
+
++ /* Add IFD offset and dirn to IFD directory list */
+ tif->tif_dirnumber++;
+
+- if (tif->tif_dirlist == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
+- uint64* new_dirlist;
+-
++ if (tif->tif_dirlistoff == NULL || tif->tif_dirlistdirn == NULL || tif->tif_dirnumber > tif->tif_dirlistsize) {
++ uint64 *new_dirlist;
+ /*
+ * XXX: Reduce memory allocation granularity of the dirlist
+ * array.
+ */
+- new_dirlist = (uint64*)_TIFFCheckRealloc(tif, tif->tif_dirlist,
+- tif->tif_dirnumber, 2 * sizeof(uint64), "for IFD list");
++ if (tif->tif_dirnumber >= 32768)
++ tif->tif_dirlistsize = 65535;
++ else
++ tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
++
++ new_dirlist = (uint64 *)_TIFFCheckRealloc(tif, tif->tif_dirlistoff,
++ tif->tif_dirlistsize, sizeof(uint64), "for IFD offset list");
+ if (!new_dirlist)
+ return 0;
+- if( tif->tif_dirnumber >= 32768 )
+- tif->tif_dirlistsize = 65535;
+- else
+- tif->tif_dirlistsize = 2 * tif->tif_dirnumber;
+- tif->tif_dirlist = new_dirlist;
++ tif->tif_dirlistoff = new_dirlist;
++ new_dirlist = (uint64 *)_TIFFCheckRealloc(tif, tif->tif_dirlistdirn,
++ tif->tif_dirlistsize, sizeof(uint16), "for IFD dirnumber list");
++ if (!new_dirlist)
++ return 0;
++ tif->tif_dirlistdirn = (uint16 *)new_dirlist;
+ }
+
+- tif->tif_dirlist[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistoff[tif->tif_dirnumber - 1] = diroff;
++ tif->tif_dirlistdirn[tif->tif_dirnumber - 1] = dirn;
+
+ return 1;
+-}
++} /* --- _TIFFCheckDirNumberAndOffset() ---*/
+
+ /*
+ * Check the count field of a directory entry against a known value. The
+@@ -4703,6 +4747,47 @@ CheckDirCount(TIFF* tif, TIFFDirEntry* d
+ }
+
+ /*
++ * Retrieve the matching IFD directory number of a given IFD offset
++ * from the list of directories already seen.
++ * Returns 1 if the offset was in the list and the directory number
++ * can be returned.
++ * Otherwise returns 0 or if an error occured.
++ */
++int
++_TIFFGetDirNumberFromOffset(TIFF *tif, uint64 diroff, uint16* dirn)
++{
++ uint16 n;
++
++ if (diroff == 0) /* no more directories */
++ return 0;
++ if (tif->tif_dirnumber == 65535) {
++ TIFFErrorExt(tif->tif_clientdata, "_TIFFGetDirNumberFromOffset",
++ "Cannot handle more than 65535 TIFF directories");
++ return 0;
++ }
++
++ /* Check if offset is already in the list and return matching directory number.
++ * Otherwise update IFD list using TIFFNumberOfDirectories()
++ * and search again in IFD list.
++ */
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ TIFFNumberOfDirectories(tif);
++ for (n = 0; n < tif->tif_dirnumber && tif->tif_dirlistoff && tif->tif_dirlistdirn; n++) {
++ if (tif->tif_dirlistoff[n] == diroff) {
++ *dirn = tif->tif_dirlistdirn[n];
++ return 1;
++ }
++ }
++ return 0;
++} /*--- _TIFFGetDirNumberFromOffset() ---*/
++
++
++/*
+ * Read IFD structure from the specified offset. If the pointer to
+ * nextdiroff variable has been specified, read it too. Function returns a
+ * number of fields in the directory or 0 if failed.
+--- tiff-4.1.0+git191117.orig/libtiff/tif_open.c
++++ tiff-4.1.0+git191117/libtiff/tif_open.c
+@@ -353,7 +353,8 @@ TIFFClientOpen(
+ if (!TIFFDefaultDirectory(tif))
+ goto bad;
+ tif->tif_diroff = 0;
+- tif->tif_dirlist = NULL;
++ tif->tif_dirlistoff = NULL;
++ tif->tif_dirlistdirn = NULL;
+ tif->tif_dirlistsize = 0;
+ tif->tif_dirnumber = 0;
+ return (tif);
+--- tiff-4.1.0+git191117.orig/libtiff/tiffiop.h
++++ tiff-4.1.0+git191117/libtiff/tiffiop.h
+@@ -145,7 +145,8 @@ struct tiff {
+ #define TIFF_CHOPPEDUPARRAYS 0x4000000U /* set when allocChoppedUpStripArrays() has modified strip array */
+ uint64 tif_diroff; /* file offset of current directory */
+ uint64 tif_nextdiroff; /* file offset of following directory */
+- uint64* tif_dirlist; /* list of offsets to already seen directories to prevent IFD looping */
++ uint64* tif_dirlistoff; /* list of offsets to already seen directories to prevent IFD looping */
++ uint16* tif_dirlistdirn; /* list of directory numbers to already seen directories to prevent IFD looping */
+ uint16 tif_dirlistsize; /* number of entries in offset list */
+ uint16 tif_dirnumber; /* number of already seen directories */
+ TIFFDirectory tif_dir; /* internal rep of current directory */
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch b/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch
new file mode 100644
index 0000000000..5747202bd9
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2022-48281.patch
@@ -0,0 +1,26 @@
+From 424c82b5b33256e7f03faace51dc8010f3ded9ff Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Sat, 21 Jan 2023 15:58:10 +0000
+Subject: [PATCH] tiffcrop: Correct simple copy paste error. Fix #488.
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz]
+CVE: CVE-2022-48281
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+
+---
+ tools/tiffcrop.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index a0789a3..8aed9cd 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7564,7 +7564,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ crop_buff = (unsigned char *)_TIFFmalloc(cropsize + NUM_BUFF_OVERSIZE_BYTES);
+ else
+ {
+- prev_cropsize = seg_buffs[0].size;
++ prev_cropsize = seg_buffs[i].size;
+ if (prev_cropsize < cropsize)
+ {
+ next_buff = _TIFFrealloc(crop_buff, cropsize + NUM_BUFF_OVERSIZE_BYTES);
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch
new file mode 100644
index 0000000000..253018525a
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-0795_0796_0797_0798_0799.patch
@@ -0,0 +1,157 @@
+From 7808740e100ba30ffb791044f3b14dec3e85ed6f Mon Sep 17 00:00:00 2001
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 21 Feb 2023 14:26:43 +0100
+Subject: [PATCH] CVE-2023-0795
+
+This is also the fix for CVE-2023-0796, CVE-2023-0797, CVE-2023-0798,
+CVE-2023-0799.
+
+Bug-Debian: https://bugs.debian.org/1031632
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/afaabc3e50d4e5d80a94143f7e3c997e7e410f68
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2023-0795 CVE-2023-0796 CVE-2023-0797 CVE-2023-0798 CVE-2023-0799
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 51 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 21 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 8aed9cd..f21a7d7 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -277,7 +277,6 @@ struct region {
+ uint32 width; /* width in pixels */
+ uint32 length; /* length in pixels */
+ uint32 buffsize; /* size of buffer needed to hold the cropped region */
+- unsigned char *buffptr; /* address of start of the region */
+ };
+
+ /* Cropping parameters from command line and image data
+@@ -532,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **);
++ unsigned char **, int);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -5112,7 +5111,6 @@ initCropMasks (struct crop_mask *cps)
+ cps->regionlist[i].width = 0;
+ cps->regionlist[i].length = 0;
+ cps->regionlist[i].buffsize = 0;
+- cps->regionlist[i].buffptr = NULL;
+ cps->zonelist[i].position = 0;
+ cps->zonelist[i].total = 0;
+ }
+@@ -6358,8 +6356,13 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ image->adjustments & ROTATE_ANY);
+ return (-1);
+ }
+-
+- if (rotateImage(rotation, image, &image->width, &image->length, work_buff_ptr))
++
++ /* Dummy variable in order not to switch two times the
++ * image->width,->length within rotateImage(),
++ * but switch xres, yres there. */
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -6427,7 +6430,6 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ /* These should not be needed for composite images */
+ crop->regionlist[i].width = crop_width;
+ crop->regionlist[i].length = crop_length;
+- crop->regionlist[i].buffptr = crop_buff;
+
+ src_rowsize = ((img_width * bps * spp) + 7) / 8;
+ dst_rowsize = (((crop_width * bps * count) + 7) / 8);
+@@ -6664,7 +6666,6 @@ extractSeparateRegion(struct image_data *image, struct crop_mask *crop,
+
+ crop->regionlist[region].width = crop_width;
+ crop->regionlist[region].length = crop_length;
+- crop->regionlist[region].buffptr = crop_buff;
+
+ src = read_buff;
+ dst = crop_buff;
+@@ -7542,7 +7543,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff))
++ &crop->combined_length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7648,7 +7649,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff))
++ &crop->regionlist[i].length, &crop_buff, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7780,7 +7781,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr))
++ &crop->combined_length, crop_buff_ptr, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8443,7 +8444,7 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+- uint32 *img_length, unsigned char **ibuff_ptr)
++ uint32 *img_length, unsigned char **ibuff_ptr, int rot_image_params)
+ {
+ int shift_width;
+ uint32 bytes_per_pixel, bytes_per_sample;
+@@ -8634,11 +8635,15 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+
+ case 270: if ((bps % 8) == 0) /* byte aligned data */
+@@ -8711,11 +8716,15 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+
+ *img_width = length;
+ *img_length = width;
+- image->width = length;
+- image->length = width;
+- res_temp = image->xres;
+- image->xres = image->yres;
+- image->yres = res_temp;
++ /* Only toggle image parameters if whole input image is rotated. */
++ if (rot_image_params)
++ {
++ image->width = length;
++ image->length = width;
++ res_temp = image->xres;
++ image->xres = image->yres;
++ image->yres = res_temp;
++ }
+ break;
+ default:
+ break;
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch
new file mode 100644
index 0000000000..bf1a439b4d
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-0800_0801_0802_0803_0804.patch
@@ -0,0 +1,135 @@
+From e18be834497e0ebf68d443abb9e18187f36cd3bf Mon Sep 17 00:00:00 2001
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 21 Feb 2023 14:39:52 +0100
+Subject: [PATCH] CVE-2023-0800
+
+This is also the fix for CVE-2023-0801, CVE-2023-0802, CVE-2023-0803,
+CVE-2023-0804.
+
+Bug-Debian: https://bugs.debian.org/1031632
+Origin: https://gitlab.com/libtiff/libtiff/-/commit/33aee1275d9d1384791d2206776eb8152d397f00
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u7.debian.tar.xz ]
+CVE: CVE-2023-0800 CVE-2023-0801 CVE-2023-0802 CVE-2023-0803 CVE-2023-0804
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ tools/tiffcrop.c | 73 +++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 69 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f21a7d7..742615a 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5250,18 +5250,40 @@ computeInputPixelOffsets(struct crop_mask *crop, struct image_data *image,
+
+ crop->regionlist[i].buffsize = buffsize;
+ crop->bufftotal += buffsize;
++
++ /* For composite images with more than one region, the
++ * combined_length or combined_width always needs to be equal,
++ * respectively.
++ * Otherwise, even the first section/region copy
++ * action might cause buffer overrun. */
+ if (crop->img_mode == COMPOSITE_IMAGES)
+ {
+ switch (crop->edge_ref)
+ {
+ case EDGE_LEFT:
+ case EDGE_RIGHT:
++ if (i > 0 && zlength != crop->combined_length)
++ {
++ TIFFError(
++ "computeInputPixelOffsets",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (-1);
++ }
+ crop->combined_length = zlength;
+ crop->combined_width += zwidth;
+ break;
+ case EDGE_BOTTOM:
+ case EDGE_TOP: /* width from left, length from top */
+ default:
++ if (i > 0 && zwidth != crop->combined_width)
++ {
++ TIFFError("computeInputPixelOffsets",
++ "Only equal width regions can be "
++ "combined for -E "
++ "top or bottom");
++ return (-1);
++ }
+ crop->combined_width = zwidth;
+ crop->combined_length += zlength;
+ break;
+@@ -6416,6 +6438,47 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ crop->combined_width = 0;
+ crop->combined_length = 0;
+
++ /* If there is more than one region, check beforehand whether all the width
++ * and length values of the regions are the same, respectively. */
++ switch (crop->edge_ref)
++ {
++ default:
++ case EDGE_TOP:
++ case EDGE_BOTTOM:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_width0 =
++ crop->regionlist[i - 1].x2 - crop->regionlist[i - 1].x1 + 1;
++ uint32_t crop_width1 =
++ crop->regionlist[i].x2 - crop->regionlist[i].x1 + 1;
++ if (crop_width0 != crop_width1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal width regions can be combined for -E "
++ "top or bottom");
++ return (1);
++ }
++ }
++ break;
++ case EDGE_LEFT:
++ case EDGE_RIGHT:
++ for (i = 1; i < crop->selections; i++)
++ {
++ uint32_t crop_length0 =
++ crop->regionlist[i - 1].y2 - crop->regionlist[i - 1].y1 + 1;
++ uint32_t crop_length1 =
++ crop->regionlist[i].y2 - crop->regionlist[i].y1 + 1;
++ if (crop_length0 != crop_length1)
++ {
++ TIFFError("extractCompositeRegions",
++ "Only equal length regions can be combined for "
++ "-E left or right");
++ return (1);
++ }
++ }
++ }
++
++
+ for (i = 0; i < crop->selections; i++)
+ {
+ /* rows, columns, width, length are expressed in pixels */
+@@ -6439,8 +6502,9 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ default:
+ case EDGE_TOP:
+ case EDGE_BOTTOM:
+- if ((i > 0) && (crop_width != crop->regionlist[i - 1].width))
+- {
++ if ((crop->selections > i + 1) &&
++ (crop_width != crop->regionlist[i + 1].width))
++ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal width regions can be combined for -E top or bottom");
+ return (1);
+@@ -6520,8 +6584,9 @@ extractCompositeRegions(struct image_data *image, struct crop_mask *crop,
+ break;
+ case EDGE_LEFT: /* splice the pieces of each row together, side by side */
+ case EDGE_RIGHT:
+- if ((i > 0) && (crop_length != crop->regionlist[i - 1].length))
+- {
++ if ((crop->selections > i + 1) &&
++ (crop_length != crop->regionlist[i + 1].length))
++ {
+ TIFFError ("extractCompositeRegions",
+ "Only equal length regions can be combined for -E left or right");
+ return (1);
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch
new file mode 100644
index 0000000000..9915b77645
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-1916.patch
@@ -0,0 +1,91 @@
+From 848434a81c443f59ec90d41218eba6e48a450a11 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Thu, 16 Mar 2023 16:16:54 +0800
+Subject: [PATCH] Fix heap-buffer-overflow in function extractImageSection
+
+CVE: CVE-2023-1916
+Upstream-Status: Submitted [https://gitlab.com/libtiff/libtiff/-/commit/848434a81c443f59ec90d41218eba6e48a450a11 https://gitlab.com/libtiff/libtiff/-/merge_requests/535]
+Signed-off-by: Marek Vasut <marex@denx.de>
+---
+ archive/tools/tiffcrop.c | 62 +++++++++++++++++++++++++++++-----------
+ 1 file changed, 45 insertions(+), 17 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/tools/tiffcrop.c
++++ tiff-4.1.0+git191117/tools/tiffcrop.c
+@@ -5549,6 +5549,15 @@ getCropOffsets(struct image_data *image,
+ crop->combined_width += (uint32)zwidth;
+ else
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_BOTTOM: /* width from left, zones from bottom to top */
+ zwidth = offsets.crop_width;
+@@ -5579,6 +5588,15 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->combined_length = (uint32)zlength;
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_RIGHT: /* zones from right to left, length from top */
+ zlength = offsets.crop_length;
+@@ -5606,6 +5624,15 @@ getCropOffsets(struct image_data *image,
+ crop->combined_width += (uint32)zwidth;
+ else
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ case EDGE_TOP: /* width from left, zones from top to bottom */
+ default:
+@@ -5632,6 +5659,15 @@ getCropOffsets(struct image_data *image,
+ else
+ crop->combined_length = (uint32)zlength;
+ crop->combined_width = (uint32)zwidth;
++
++ /* When the degrees clockwise rotation is 90 or 270, check the boundary */
++ if (((crop->rotation == 90) || (crop->rotation == 270))
++ && ((crop->combined_length > image->width) || (crop->combined_width > image->length)))
++ {
++ TIFFError("getCropOffsets", "The crop size exceeds the image boundary size");
++ return -1;
++ }
++
+ break;
+ } /* end switch statement */
+
+@@ -6827,9 +6863,9 @@ extractImageSection(struct image_data *i
+ * regardless of the way the data are organized in the input file.
+ * Furthermore, bytes and bits are arranged in buffer according to COMPRESSION=1 and FILLORDER=1
+ */
+- img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
+- full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
+- trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
++ img_rowsize = (((img_width * spp * bps) + 7) / 8); /* row size in full bytes of source image */
++ full_bytes = (sect_width * spp * bps) / 8; /* number of COMPLETE bytes per row in section */
++ trailing_bits = (sect_width * spp * bps) % 8; /* trailing bits within the last byte of destination buffer */
+
+ #ifdef DEVELMODE
+ TIFFError ("", "First row: %d, last row: %d, First col: %d, last col: %d\n",
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch
new file mode 100644
index 0000000000..7d6d40f25a
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-25433.patch
@@ -0,0 +1,173 @@
+From 9c22495e5eeeae9e00a1596720c969656bb8d678 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 15:31:31 +0100
+Subject: [PATCH] tiffcrop correctly update buffersize after rotateImage()
+ fix#520 rotateImage() set up a new buffer and calculates its size
+ individually. Therefore, seg_buffs[] size needs to be updated accordingly.
+ Before this fix, the seg_buffs buffer size was calculated with a different
+ formula than within rotateImage().
+
+Closes #520.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9c22495e5eeeae9e00a1596720c969656bb8d678 && https://gitlab.com/libtiff/libtiff/-/commit/688012dca2c39033aa2dc7bcea9796787cfd1b44]
+CVE: CVE-2023-25433
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 69 +++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 56 insertions(+), 13 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 742615a..aab0ec6 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -531,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **, int);
++ unsigned char **, size_t *);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -6384,7 +6384,7 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ * but switch xres, yres there. */
+ uint32_t width = image->width;
+ uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, TRUE))
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+@@ -7607,8 +7607,12 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
++ /* rotateImage() set up a new buffer and calculates its size
++ * individually. Therefore, seg_buffs size needs to be updated
++ * accordingly. */
++ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, FALSE))
++ &crop->combined_length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7713,8 +7717,13 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+- if (rotateImage(crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, FALSE))
++ /* Furthermore, rotateImage() set up a new buffer and calculates
++ * its size individually. Therefore, seg_buffs size needs to be
++ * updated accordingly. */
++ size_t rot_buf_size = 0;
++ if (rotateImage(
++ crop->rotation, image, &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7725,8 +7734,7 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ crop->combined_width = total_width;
+ crop->combined_length = total_length;
+ seg_buffs[i].buffer = crop_buff;
+- seg_buffs[i].size = (((crop->regionlist[i].width * image->bps + 7 ) / 8)
+- * image->spp) * crop->regionlist[i].length;
++ seg_buffs[i].size = rot_buf_size;
+ }
+ }
+ }
+@@ -7735,7 +7743,6 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+
+ /* Copy the crop section of the data from the current image into a buffer
+ * and adjust the IFD values to reflect the new size. If no cropping is
+- * required, use the origial read buffer as the crop buffer.
+ *
+ * There is quite a bit of redundancy between this routine and the more
+ * specialized processCropSelections, but this provides
+@@ -7846,7 +7853,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, TRUE))
++ &crop->combined_length, crop_buff_ptr, NULL))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8515,7 +8522,8 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ uint32 bytes_per_pixel, bytes_per_sample;
+ uint32 row, rowsize, src_offset, dst_offset;
+ uint32 i, col, width, length;
+- uint32 colsize, buffsize, col_offset, pix_offset;
++ uint32 colsize, col_offset, pix_offset;
++ tmsize_t buffsize;
+ unsigned char *ibuff;
+ unsigned char *src;
+ unsigned char *dst;
+@@ -8528,12 +8536,41 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ spp = image->spp;
+ bps = image->bps;
+
++ if ((spp != 0 && bps != 0 &&
++ width > (uint32_t)((UINT32_MAX - 7) / spp / bps)) ||
++ (spp != 0 && bps != 0 &&
++ length > (uint32_t)((UINT32_MAX - 7) / spp / bps)))
++ {
++ TIFFError("rotateImage", "Integer overflow detected.");
++ return (-1);
++ }
++
+ rowsize = ((bps * spp * width) + 7) / 8;
+ colsize = ((bps * spp * length) + 7) / 8;
+ if ((colsize * width) > (rowsize * length))
+- buffsize = (colsize + 1) * width;
++{
++ if (((tmsize_t)colsize + 1) != 0 &&
++ (tmsize_t)width > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)colsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = ((tmsize_t)colsize + 1) * width;
++ }
+ else
+- buffsize = (rowsize + 1) * length;
++ {
++ if (((tmsize_t)rowsize + 1) != 0 &&
++ (tmsize_t)length > ((TIFF_TMSIZE_T_MAX - NUM_BUFF_OVERSIZE_BYTES) /
++ ((tmsize_t)rowsize + 1)))
++ {
++ TIFFError("rotateImage",
++ "Integer overflow when calculating buffer size.");
++ return (-1);
++ }
++ buffsize = (rowsize + 1) * length;
++ }
+
+ bytes_per_sample = (bps + 7) / 8;
+ bytes_per_pixel = ((bps * spp) + 7) / 8;
+@@ -8556,11 +8593,17 @@ rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+ /* Add 3 padding bytes for extractContigSamplesShifted32bits */
+ if (!(rbuff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES)))
+ {
+- TIFFError("rotateImage", "Unable to allocate rotation buffer of %1u bytes", buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ TIFFError("rotateImage",
++ "Unable to allocate rotation buffer of %" TIFF_SSIZE_FORMAT
++ " bytes ",
++ buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ return (-1);
+ }
+ _TIFFmemset(rbuff, '\0', buffsize + NUM_BUFF_OVERSIZE_BYTES);
+
++ if (rot_buf_size != NULL)
++ *rot_buf_size = buffsize;
++
+ ibuff = *ibuff_ptr;
+ switch (rotation)
+ {
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch
new file mode 100644
index 0000000000..6a6596f092
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-25434-CVE-2023-25435.patch
@@ -0,0 +1,94 @@
+From 69818e2f2d246e6631ac2a2da692c3706b849c38 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sun, 29 Jan 2023 11:09:26 +0100
+Subject: [PATCH] tiffcrop: Amend rotateImage() not to toggle the input (main)
+ image width and length parameters when only cropped image sections are
+ rotated. Remove buffptr from region structure because never used.
+
+Closes #492 #493 #494 #495 #499 #518 #519
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/69818e2f2d246e6631ac2a2da692c3706b849c38]
+CVE: CVE-2023-25434 & CVE-2023-25435
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index aab0ec6..ce84414 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -531,7 +531,7 @@ static int rotateContigSamples24bits(uint16, uint16, uint16, uint32,
+ static int rotateContigSamples32bits(uint16, uint16, uint16, uint32,
+ uint32, uint32, uint8 *, uint8 *);
+ static int rotateImage(uint16, struct image_data *, uint32 *, uint32 *,
+- unsigned char **, size_t *);
++ unsigned char **, size_t *, int);
+ static int mirrorImage(uint16, uint16, uint16, uint32, uint32,
+ unsigned char *);
+ static int invertImage(uint16, uint16, uint16, uint32, uint32,
+@@ -6382,10 +6382,11 @@ static int correct_orientation(struct image_data *image, unsigned char **work_b
+ /* Dummy variable in order not to switch two times the
+ * image->width,->length within rotateImage(),
+ * but switch xres, yres there. */
+- uint32_t width = image->width;
+- uint32_t length = image->length;
+- if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL))
+- {
++ uint32_t width = image->width;
++ uint32_t length = image->length;
++ if (rotateImage(rotation, image, &width, &length, work_buff_ptr, NULL,
++ TRUE))
++ {
+ TIFFError ("correct_orientation", "Unable to rotate image");
+ return (-1);
+ }
+@@ -7612,7 +7613,8 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * accordingly. */
+ size_t rot_buf_size = 0;
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, &crop_buff, &rot_buf_size))
++ &crop->combined_length, &crop_buff, &rot_buf_size,
++ FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate composite regions by %d degrees", crop->rotation);
+@@ -7721,9 +7723,10 @@ processCropSelections(struct image_data *image, struct crop_mask *crop,
+ * its size individually. Therefore, seg_buffs size needs to be
+ * updated accordingly. */
+ size_t rot_buf_size = 0;
+- if (rotateImage(
+- crop->rotation, image, &crop->regionlist[i].width,
+- &crop->regionlist[i].length, &crop_buff, &rot_buf_size))
++ if (rotateImage(crop->rotation, image,
++ &crop->regionlist[i].width,
++ &crop->regionlist[i].length, &crop_buff,
++ &rot_buf_size, FALSE))
+ {
+ TIFFError("processCropSelections",
+ "Failed to rotate crop region by %d degrees", crop->rotation);
+@@ -7853,7 +7856,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ if (crop->crop_mode & CROP_ROTATE) /* rotate should be last as it can reallocate the buffer */
+ {
+ if (rotateImage(crop->rotation, image, &crop->combined_width,
+- &crop->combined_length, crop_buff_ptr, NULL))
++ &crop->combined_length, crop_buff_ptr, NULL, TRUE))
+ {
+ TIFFError("createCroppedImage",
+ "Failed to rotate image or cropped selection by %d degrees", crop->rotation);
+@@ -8515,8 +8518,10 @@ rotateContigSamples32bits(uint16 rotation, uint16 spp, uint16 bps, uint32 width,
+
+ /* Rotate an image by a multiple of 90 degrees clockwise */
+ static int
+-rotateImage(uint16 rotation, struct image_data *image, uint32 *img_width,
+- uint32 *img_length, unsigned char **ibuff_ptr, int rot_image_params)
++rotateImage(uint16 rotation, struct image_data *image,
++ uint32 *img_width, uint32 *img_length,
++ unsigned char **ibuff_ptr, size_t *rot_buf_size,
++ int rot_image_params)
+ {
+ int shift_width;
+ uint32 bytes_per_pixel, bytes_per_sample;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch
new file mode 100644
index 0000000000..b7a7e93764
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-26965.patch
@@ -0,0 +1,90 @@
+From ec8ef90c1f573c9eb1f17d6a056aa0015f184acf Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Tue, 14 Feb 2023 20:43:43 +0100
+Subject: [PATCH] tiffcrop: Do not reuse input buffer for subsequent images.
+ Fix issue 527
+
+Reuse of read_buff within loadImage() from previous image is quite unsafe, because other functions (like rotateImage() etc.) reallocate that buffer with different size without updating the local prev_readsize value.
+
+Closes #527
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz]
+CVE: CVE-2023-26965
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 40 ++++++++++------------------------------
+ 1 file changed, 10 insertions(+), 30 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index ce84414..a533089 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -5935,9 +5935,7 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ uint32 tw = 0, tl = 0; /* Tile width and length */
+ tmsize_t tile_rowsize = 0;
+ unsigned char *read_buff = NULL;
+- unsigned char *new_buff = NULL;
+ int readunit = 0;
+- static tmsize_t prev_readsize = 0;
+
+ TIFFGetFieldDefaulted(in, TIFFTAG_BITSPERSAMPLE, &bps);
+ TIFFGetFieldDefaulted(in, TIFFTAG_SAMPLESPERPIXEL, &spp);
+@@ -6232,37 +6230,20 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ read_buff = *read_ptr;
+ /* +3 : add a few guard bytes since reverseSamples16bits() can read a bit */
+ /* outside buffer */
+- if (!read_buff)
++ if (read_buff)
+ {
+- if( buffsize > 0xFFFFFFFFU - 3 )
+- {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
+- }
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
++ _TIFFfree(read_buff);
+ }
+- else
+- {
+- if (prev_readsize < buffsize)
+- {
+- if( buffsize > 0xFFFFFFFFU - 3 )
+- {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
+- return (-1);
+- }
+- new_buff = _TIFFrealloc(read_buff, buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- if (!new_buff)
+- {
+- free (read_buff);
+- read_buff = (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+- }
+- else
+- read_buff = new_buff;
+- }
+- }
++ if (buffsize > 0xFFFFFFFFU - 3)
++ {
++ TIFFError("loadImage", "Required read buffer size too large");
++ return (-1);
++ }
++ read_buff =
++ (unsigned char *)_TIFFmalloc(buffsize + NUM_BUFF_OVERSIZE_BYTES);
+ if (!read_buff)
+ {
+- TIFFError("loadImage", "Unable to allocate/reallocate read buffer");
++ TIFFError("loadImage", "Unable to allocate read buffer");
+ return (-1);
+ }
+
+@@ -6270,7 +6251,6 @@ loadImage(TIFF* in, struct image_data *image, struct dump_opts *dump, unsigned c
+ read_buff[buffsize+1] = 0;
+ read_buff[buffsize+2] = 0;
+
+- prev_readsize = buffsize;
+ *read_ptr = read_buff;
+
+ /* N.B. The read functions used copy separate plane data into a buffer as interleaved
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch
new file mode 100644
index 0000000000..48657e6aa4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-26966.patch
@@ -0,0 +1,35 @@
+From b0e1c25dd1d065200c8d8f59ad0afe014861a1b9 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Thu, 16 Feb 2023 12:03:16 +0100
+Subject: [PATCH] tif_luv: Check and correct for NaN data in uv_encode().
+
+Closes #530
+
+Upstream-Status: Backport [import from debian http://security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz]
+CVE: CVE-2023-26966
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_luv.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/libtiff/tif_luv.c b/libtiff/tif_luv.c
+index 6fe4858..8b2c5f1 100644
+--- a/libtiff/tif_luv.c
++++ b/libtiff/tif_luv.c
+@@ -923,6 +923,13 @@ uv_encode(double u, double v, int em) /* encode (u',v') coordinates */
+ {
+ register int vi, ui;
+
++ /* check for NaN */
++ if (u != u || v != v)
++ {
++ u = U_NEU;
++ v = V_NEU;
++ }
++
+ if (v < UV_VSTART)
+ return oog_encode(u, v);
+ vi = itrunc((v - UV_VSTART)*(1./UV_SQSIZ), em);
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch
new file mode 100644
index 0000000000..62a5e1831c
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-2908.patch
@@ -0,0 +1,33 @@
+From 8c0859a80444c90b8dfb862a9f16de74e16f0a9e Mon Sep 17 00:00:00 2001
+From: xiaoxiaoafeifei <lliangliang2007@163.com>
+Date: Fri, 21 Apr 2023 13:01:34 +0000
+Subject: [PATCH] countInkNamesString(): fix `UndefinedBehaviorSanitizer`:
+ applying zero offset to null pointer
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/9bd48f0dbd64fb94dc2b5b05238fde0bfdd4ff3f]
+CVE: CVE-2023-2908
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/libtiff/tif_dir.c b/libtiff/tif_dir.c
+index 9d8267a..6389b40 100644
+--- a/libtiff/tif_dir.c
++++ b/libtiff/tif_dir.c
+@@ -145,10 +145,10 @@ static uint16
+ countInkNamesString(TIFF *tif, uint32 slen, const char *s)
+ {
+ uint16 i = 0;
+- const char *ep = s + slen;
+- const char *cp = s;
+
+ if (slen > 0) {
++ const char *ep = s + slen;
++ const char *cp = s;
+ do {
+ for (; cp < ep && *cp != '\0'; cp++) {}
+ if (cp >= ep)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch
new file mode 100644
index 0000000000..8db24fc714
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3316.patch
@@ -0,0 +1,59 @@
+From d63de61b1ec3385f6383ef9a1f453e4b8b11d536 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 3 Feb 2023 17:38:55 +0100
+Subject: [PATCH] TIFFClose() avoid NULL pointer dereferencing. fix#515
+
+Closes #515
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/d63de61b1ec3385f6383ef9a1f453e4b8b11d536]
+CVE: CVE-2023-3316
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ libtiff/tif_close.c | 11 +++++++----
+ tools/tiffcrop.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/libtiff/tif_close.c b/libtiff/tif_close.c
+index e4228df..335e80f 100644
+--- a/libtiff/tif_close.c
++++ b/libtiff/tif_close.c
+@@ -118,13 +118,16 @@ TIFFCleanup(TIFF* tif)
+ */
+
+ void
+-TIFFClose(TIFF* tif)
++TIFFClose(TIFF *tif)
+ {
+- TIFFCloseProc closeproc = tif->tif_closeproc;
+- thandle_t fd = tif->tif_clientdata;
++ if (tif != NULL)
++ {
++ TIFFCloseProc closeproc = tif->tif_closeproc;
++ thandle_t fd = tif->tif_clientdata;
+
+ TIFFCleanup(tif);
+- (void) (*closeproc)(fd);
++ (void)(*closeproc)(fd);
++ }
+ }
+
+ /* vim: set ts=8 sts=8 sw=8 noet: */
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index a533089..f14bb0c 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2526,7 +2526,10 @@ main(int argc, char* argv[])
+ }
+ }
+
+- TIFFClose(out);
++ if (out != NULL)
++ {
++ TIFFClose(out);
++ }
+
+ return (0);
+ } /* end main */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch
new file mode 100644
index 0000000000..67837fe142
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3576.patch
@@ -0,0 +1,35 @@
+From 881a070194783561fd209b7c789a4e75566f7f37 Mon Sep 17 00:00:00 2001
+From: zhailiangliang <zhailiangliang@loongson.cn>
+Date: Tue, 7 Mar 2023 15:02:08 +0800
+Subject: [PATCH] Fix memory leak in tiffcrop.c
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/881a070194783561fd209b7c789a4e75566f7f37]
+CVE: CVE-2023-3576
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/tiffcrop.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index f14bb0c..7121c7c 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -7746,8 +7746,13 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+
+ read_buff = *read_buff_ptr;
+
++ /* Memory is freed before crop_buff_ptr is overwritten */
++ if (*crop_buff_ptr != NULL)
++ {
++ _TIFFfree(*crop_buff_ptr);
++ }
++
+ /* process full image, no crop buffer needed */
+- crop_buff = read_buff;
+ *crop_buff_ptr = read_buff;
+ crop->combined_width = image->width;
+ crop->combined_length = image->length;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch
new file mode 100644
index 0000000000..fd67305c0b
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-3618.patch
@@ -0,0 +1,47 @@
+From b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Fri, 5 May 2023 19:43:46 +0200
+Subject: [PATCH] Consider error return of writeSelections(). Fixes #553
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/b5c7d4c4e03333ac16b5cfb11acaaeaa493334f8]
+CVE: CVE-2023-3618
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcrop.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c
+index 7121c7c..93b7f96 100644
+--- a/tools/tiffcrop.c
++++ b/tools/tiffcrop.c
+@@ -2437,9 +2437,15 @@ main(int argc, char* argv[])
+ { /* Whole image or sections not based on output page size */
+ if (crop.selections > 0)
+ {
+- writeSelections(in, &out, &crop, &image, &dump, seg_buffs,
+- mp, argv[argc - 1], &next_page, total_pages);
+- }
++ if (writeSelections(in, &out, &crop, &image, &dump,
++ seg_buffs, mp, argv[argc - 1],
++ &next_page, total_pages))
++ {
++ TIFFError("main",
++ "Unable to write new image selections");
++ exit(EXIT_FAILURE);
++ }
++ }
+ else /* One file all images and sections */
+ {
+ if (update_output_file (&out, mp, crop.exp_mode, argv[argc - 1],
+@@ -7749,7 +7755,7 @@ createCroppedImage(struct image_data *image, struct crop_mask *crop,
+ /* Memory is freed before crop_buff_ptr is overwritten */
+ if (*crop_buff_ptr != NULL)
+ {
+- _TIFFfree(*crop_buff_ptr);
++ _TIFFfree(*crop_buff_ptr);
+ }
+
+ /* process full image, no crop buffer needed */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch
new file mode 100644
index 0000000000..6eb286039f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-40745.patch
@@ -0,0 +1,34 @@
+From 4fc16f649fa2875d5c388cf2edc295510a247ee5 Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:34:25 +0000
+Subject: [PATCH] tiffcp: fix memory corruption (overflow) on hostile images
+ (fixes #591)
+
+Upstream-Status: Backport from [https://gitlab.com/libtiff/libtiff/-/commit/4fc16f649fa2875d5c388cf2edc295510a247ee5]
+CVE: CVE-2023-40745
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ tools/tiffcp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 83b3910..007bd05 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -1437,6 +1437,13 @@ DECLAREreadFunc(readSeparateTilesIntoBuffer)
+ TIFFError(TIFFFileName(in), "Error, cannot handle that much samples per tile row (Tile Width * Samples/Pixel)");
+ return 0;
+ }
++
++ if ( (imagew - tilew * spp) > INT_MAX ){
++ TIFFError(TIFFFileName(in),
++ "Error, image raster scan line size is too large");
++ return 0;
++ }
++
+ iskew = imagew - tilew*spp;
+ tilebuf = _TIFFmalloc(tilesize);
+ if (tilebuf == 0)
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch
new file mode 100644
index 0000000000..3f44a42012
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-41175.patch
@@ -0,0 +1,67 @@
+From 4cc97e3dfa6559f4d17af0d0687bcae07ca4b73d Mon Sep 17 00:00:00 2001
+From: Arie Haenel <arie.haenel@jct.ac.il>
+Date: Wed, 19 Jul 2023 19:40:01 +0000
+Subject: raw2tiff: fix integer overflow and bypass of the check (fixes #592)
+
+Upstream-Status: Backport [import from debian security.debian.org/debian-security/pool/updates/main/t/tiff/tiff_4.1.0+git191117-2~deb10u8.debian.tar.xz
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/6e2dac5f904496d127c92ddc4e56eccfca25c2ee]
+CVE: CVE-2023-41175
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/raw2tiff.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+diff --git a/tools/raw2tiff.c b/tools/raw2tiff.c
+index ab36ff4e..a905da52 100644
+--- a/tools/raw2tiff.c
++++ b/tools/raw2tiff.c
+@@ -35,6 +35,7 @@
+ #include <sys/types.h>
+ #include <math.h>
+ #include <ctype.h>
++#include <limits.h>
+
+ #ifdef HAVE_UNISTD_H
+ # include <unistd.h>
+@@ -101,6 +102,7 @@ main(int argc, char* argv[])
+ int fd;
+ char *outfilename = NULL;
+ TIFF *out;
++ uint32 temp_limit_check = 0;
+
+ uint32 row, col, band;
+ int c;
+@@ -212,6 +214,30 @@ main(int argc, char* argv[])
+ if (guessSize(fd, dtype, hdr_size, nbands, swab, &width, &length) < 0)
+ return 1;
+
++ if ((width == 0) || (length == 0) ){
++ fprintf(stderr, "Too large nbands value specified.\n");
++ return (EXIT_FAILURE);
++ }
++
++ temp_limit_check = nbands * depth;
++
++ if ( !temp_limit_check || length > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large length size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * length;
++
++ if ( !temp_limit_check || width > ( UINT_MAX / temp_limit_check ) ) {
++ fprintf(stderr, "Too large width size specified.\n");
++ return (EXIT_FAILURE);
++ }
++ temp_limit_check = temp_limit_check * width;
++
++ if ( !temp_limit_check || hdr_size > ( UINT_MAX - temp_limit_check ) ) {
++ fprintf(stderr, "Too large header size specified.\n");
++ return (EXIT_FAILURE);
++ }
++
+ if (outfilename == NULL)
+ outfilename = argv[optind+1];
+ out = TIFFOpen(outfilename, "w");
+--
+2.30.2
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch
new file mode 100644
index 0000000000..1b651e6529
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-52356.patch
@@ -0,0 +1,53 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFErrorExt instead of TIFFErrorExtR (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 51558511bdbbcffdce534db21dbaf5d54b31638a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 15:58:41 +0100
+Subject: [PATCH] TIFFReadRGBAStrip/TIFFReadRGBATile: add more validation of
+ col/row (fixes #622)
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-52356.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/51558511bdbbcffdce534db21dbaf5d54b31638a]
+CVE: CVE-2023-52356
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_getimage.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_getimage.c
++++ tiff-4.1.0+git191117/libtiff/tif_getimage.c
+@@ -2926,6 +2926,13 @@ TIFFReadRGBAStripExt(TIFF* tif, uint32 r
+ }
+
+ if (TIFFRGBAImageOK(tif, emsg) && TIFFRGBAImageBegin(&img, tif, stop_on_error, emsg)) {
++ if (row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row passed to TIFFReadRGBAStrip().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
+
+ img.row_offset = row;
+ img.col_offset = 0;
+@@ -3002,6 +3009,14 @@ TIFFReadRGBATileExt(TIFF* tif, uint32 co
+ return( 0 );
+ }
+
++ if (col >= img.width || row >= img.height)
++ {
++ TIFFErrorExt(tif->tif_clientdata, TIFFFileName(tif),
++ "Invalid row/col passed to TIFFReadRGBATile().");
++ TIFFRGBAImageEnd(&img);
++ return (0);
++ }
++
+ /*
+ * The TIFFRGBAImageGet() function doesn't allow us to get off the
+ * edge of the image, even to fill an otherwise valid tile. So we
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch
new file mode 100644
index 0000000000..a777dea9b0
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6228.patch
@@ -0,0 +1,30 @@
+From 1e7d217a323eac701b134afc4ae39b6bdfdbc96a Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 9 Sep 2023 15:45:47 +0200
+Subject: [PATCH] Check also if codec of input image is available,
+ independently from codec check of output image and return with error if not.
+ Fixes #606.
+
+Upstream-Status: Backport [https://gitlab.com/libtiff/libtiff/-/commit/1e7d217a323eac701b134afc4ae39b6bdfdbc96a]
+CVE: CVE-2023-6228
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ tools/tiffcp.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index 007bd05..d2f7b66 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -628,6 +628,8 @@ tiffcp(TIFF* in, TIFF* out)
+ else
+ CopyField(TIFFTAG_COMPRESSION, compression);
+ TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression);
++ if (!TIFFIsCODECConfigured(input_compression))
++ return FALSE;
+ TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric);
+ if (input_compression == COMPRESSION_JPEG) {
+ /* Force conversion to RGB */
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch
new file mode 100644
index 0000000000..e955b3f2e4
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-1.patch
@@ -0,0 +1,191 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . included inttypes.h header to support PRIu32 and PRIu64;
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+ . calls to the check size, that is the idea of the patch, were added before
+ _TIFFCheckMalloc and may note match the original patch methods;
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 5320c9d89c054fa805d037d84c57da874470b01a Mon Sep 17 00:00:00 2001
+From: Su Laus <sulau@freenet.de>
+Date: Tue, 31 Oct 2023 15:43:29 +0000
+Subject: [PATCH] Prevent some out-of-memory attacks
+
+Some small fuzzer files fake large amounts of data and provoke out-of-memory situations. For non-compressed data content / tags, out-of-memory can be prevented by comparing with the file size.
+
+At image reading, data size of some tags / data structures (StripByteCounts, StripOffsets, StripArray, TIFF directory) is compared with file size to prevent provoked out-of-memory attacks.
+
+See issue https://gitlab.com/libtiff/libtiff/-/issues/614#note_1602683857
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-1.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/5320c9d89c054fa805d037d84c57da874470b01a]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 92 ++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 90 insertions(+), 2 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -37,6 +37,7 @@
+ #include "tiffiop.h"
+ #include <float.h>
+ #include <stdlib.h>
++#include <inttypes.h>
+
+ #define FAILED_FII ((uint32) -1)
+
+@@ -863,6 +864,21 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size.
++ */
++ uint64 filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
++
+ if( isMapped(tif) && datasize > (uint32)tif->tif_size )
+ return TIFFReadDirEntryErrIo;
+
+@@ -4534,6 +4550,20 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
++
+ if (td->td_stripbytecount_p)
+ _TIFFfree(td->td_stripbytecount_p);
+ td->td_stripbytecount_p = (uint64*)
+@@ -4544,9 +4574,7 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+
+ if (td->td_compression != COMPRESSION_NONE) {
+ uint64 space;
+- uint64 filesize;
+ uint16 n;
+- filesize = TIFFGetFileSize(tif);
+ if (!(tif->tif_flags&TIFF_BIGTIFF))
+ space=sizeof(TIFFHeaderClassic)+2+dircount*12+4;
+ else
+@@ -4854,6 +4882,20 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
+@@ -4957,6 +4999,20 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ "Sanity check on directory count failed, zero tag directories not supported");
+ return 0;
+ }
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize,
+ "to read TIFF directory");
+@@ -5000,6 +5056,8 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ }
+ }
+ }
++ /* No check against filesize needed here because "dir" should have same size
++ * than "origdir" checked above. */
+ dir = (TIFFDirEntry*)_TIFFCheckMalloc(tif, dircount16,
+ sizeof(TIFFDirEntry),
+ "to read TIFF directory");
+@@ -5769,7 +5827,20 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ _TIFFfree(data);
+ return(0);
+ }
+-
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ uint64 filesize = TIFFGetFileSize(tif);
++ uint64 allocsize = (uint64)nstrips * sizeof(uint64);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ resizeddata=(uint64*)_TIFFCheckMalloc(tif,nstrips,sizeof(uint64),"for strip array");
+ if (resizeddata==0) {
+ _TIFFfree(data);
+@@ -5865,6 +5936,23 @@ static void allocChoppedUpStripArrays(TI
+ }
+ bytecount = last_offset + last_bytecount - offset;
+
++ /* Before allocating a huge amount of memory for corrupted files, check if
++ * size of StripByteCount and StripOffset tags is not greater than
++ * file size.
++ */
++ uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
++ uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greather than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
++
+ newcounts = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
+ "for chopped \"StripByteCounts\" array");
+ newoffsets = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch
new file mode 100644
index 0000000000..644b3fdb3f
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-2.patch
@@ -0,0 +1,152 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From 0b025324711213a75e38b52f7e7ba60235f108aa Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 19:47:22 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+Ammends 5320c9d89c054fa805d037d84c57da874470b01a
+
+This fixes a performance regression caught by the GDAL regression test
+suite.
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-2.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/0b025324711213a75e38b52f7e7ba60235f108aa]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 83 +++++++++++++++++++++++++------------------
+ 1 file changed, 48 insertions(+), 35 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -864,19 +864,22 @@ static enum TIFFReadDirEntryErr TIFFRead
+ datasize=(*count)*typesize;
+ assert((tmsize_t)datasize>0);
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size.
+- */
+- uint64 filesize = TIFFGetFileSize(tif);
+- if (datasize > filesize)
++ if (datasize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
+- "Requested memory size for tag %d (0x%x) %" PRIu32
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, tag not read",
+- direntry->tdir_tag, direntry->tdir_tag, datasize,
+- filesize);
+- return (TIFFReadDirEntryErrAlloc);
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (datasize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "ReadDirEntryArray",
++ "Requested memory size for tag %d (0x%x) %" PRIu32
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, tag not read",
++ direntry->tdir_tag, direntry->tdir_tag, datasize,
++ filesize);
++ return (TIFFReadDirEntryErrAlloc);
++ }
+ }
+
+ if( isMapped(tif) && datasize > (uint32)tif->tif_size )
+@@ -4550,18 +4553,22 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ if( !_TIFFFillStrilesInternal( tif, 0 ) )
+ return -1;
+
+- /* Before allocating a huge amount of memory for corrupted files, check if
+- * size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)td->td_nstrips * sizeof(uint64);
++ uint64 filesize = 0;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripByteCounts of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return -1;
++ /* Before allocating a huge amount of memory for corrupted files, check
++ * if size of requested memory is not greater than file size. */
++ filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripByteCounts of %" PRIu64
++ " is greater than filesize %" PRIu64 ". Memory not allocated",
++ allocsize, filesize);
++ return -1;
++ }
+ }
+
+ if (td->td_stripbytecount_p)
+@@ -4608,11 +4615,13 @@ EstimateStripByteCounts(TIFF* tif, TIFFD
+ return -1;
+ space+=datasize;
+ }
++ if (filesize == 0)
++ filesize = TIFFGetFileSize(tif);
+ if( filesize < space )
+- /* we should perhaps return in error ? */
+- space = filesize;
+- else
+- space = filesize - space;
++ /* we should perhaps return in error ? */
++ space = filesize;
++ else
++ space = filesize - space;
+ if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
+ space /= td->td_samplesperpixel;
+ for (strip = 0; strip < td->td_nstrips; strip++)
+@@ -4882,19 +4891,23 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)dircount16 * dirsize;
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)dircount16 * dirsize;
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for TIFF directory of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated, TIFF directory not read",
++ allocsize, filesize);
++ return 0;
++ }
+ }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch
new file mode 100644
index 0000000000..ed7d7e7b96
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-3.patch
@@ -0,0 +1,46 @@
+Backport of:
+
+From de7bfd7d4377c266f81849579f696fa1ad5ba6c3 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 20:13:45 +0100
+Subject: [PATCH] TIFFFetchDirectory(): remove useless allocsize vs filesize
+ check
+
+CoverityScan rightly points that the max value for dircount16 * dirsize
+is 4096 * 20. That's small enough not to do any check
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-3.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/de7bfd7d4377c266f81849579f696fa1ad5ba6c3]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -4891,24 +4891,6 @@ TIFFFetchDirectory(TIFF* tif, uint64 dir
+ dircount16 = (uint16)dircount64;
+ dirsize = 20;
+ }
+- const uint64 allocsize = (uint64)dircount16 * dirsize;
+- if (allocsize > 100 * 1024 * 1024)
+- {
+- /* Before allocating a huge amount of memory for corrupted files,
+- * check if size of requested memory is not greater than file size.
+- */
+- const uint64 filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
+- {
+- TIFFWarningExt(
+- tif->tif_clientdata, module,
+- "Requested memory size for TIFF directory of %" PRIu64
+- " is greater than filesize %" PRIu64
+- ". Memory not allocated, TIFF directory not read",
+- allocsize, filesize);
+- return 0;
+- }
+- }
+ origdir = _TIFFCheckMalloc(tif, dircount16,
+ dirsize, "to read TIFF directory");
+ if (origdir == NULL)
diff --git a/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch
new file mode 100644
index 0000000000..1a43fd3230
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/files/CVE-2023-6277-4.patch
@@ -0,0 +1,94 @@
+[Ubuntu note: Backport of the following patch from upstream, with a few changes
+to match the current version of the file in the present Ubuntu release:
+ . using TIFFWarningExt instead of TIFFWarningExtR (the latter did not exist yet);
+ . using uint64 instead of uint64_t to preserve the current code usage;
+ . calling _TIFFfree(data) instead of _TIFFfreeExt(tif, data) (the latter did not exist yet);
+-- Rodrigo Figueiredo Zaiden]
+
+Backport of:
+
+From dbb825a8312f30e63a06c272010967d51af5c35a Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Tue, 31 Oct 2023 21:30:58 +0100
+Subject: [PATCH] tif_dirread.c: only issue TIFFGetFileSize() for large enough
+ RAM requests
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/tiff/tree/debian/patches/CVE-2023-6277-4.patch?h=ubuntu/focal-security
+Upstream commit https://gitlab.com/libtiff/libtiff/-/commit/dbb825a8312f30e63a06c272010967d51af5c35a]
+CVE: CVE-2023-6277
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libtiff/tif_dirread.c | 54 +++++++++++++++++++++++++------------------
+ 1 file changed, 31 insertions(+), 23 deletions(-)
+
+--- tiff-4.1.0+git191117.orig/libtiff/tif_dirread.c
++++ tiff-4.1.0+git191117/libtiff/tif_dirread.c
+@@ -5822,19 +5822,24 @@ TIFFFetchStripThing(TIFF* tif, TIFFDirEn
+ _TIFFfree(data);
+ return(0);
+ }
+- /* Before allocating a huge amount of memory for corrupted files, check
+- * if size of requested memory is not greater than file size. */
+- uint64 filesize = TIFFGetFileSize(tif);
+- uint64 allocsize = (uint64)nstrips * sizeof(uint64);
+- if (allocsize > filesize)
++ const uint64 allocsize = (uint64)nstrips * sizeof(uint64);
++ if (allocsize > 100 * 1024 * 1024)
+ {
+- TIFFWarningExt(tif->tif_clientdata, module,
+- "Requested memory size for StripArray of %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- _TIFFfree(data);
+- return (0);
++ /* Before allocating a huge amount of memory for corrupted files,
++ * check if size of requested memory is not greater than file size.
++ */
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(
++ tif->tif_clientdata, module,
++ "Requested memory size for StripArray of %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ _TIFFfree(data);
++ return (0);
++ }
+ }
+ resizeddata=(uint64*)_TIFFCheckMalloc(tif,nstrips,sizeof(uint64),"for strip array");
+ if (resizeddata==0) {
+@@ -5935,17 +5940,20 @@ static void allocChoppedUpStripArrays(TI
+ * size of StripByteCount and StripOffset tags is not greater than
+ * file size.
+ */
+- uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
+- uint64 filesize = TIFFGetFileSize(tif);
+- if (allocsize > filesize)
+- {
+- TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
+- "Requested memory size for StripByteCount and "
+- "StripOffsets %" PRIu64
+- " is greather than filesize %" PRIu64
+- ". Memory not allocated",
+- allocsize, filesize);
+- return;
++ const uint64 allocsize = (uint64)nstrips * sizeof(uint64) * 2;
++ if (allocsize > 100 * 1024 * 1024)
++ {
++ const uint64 filesize = TIFFGetFileSize(tif);
++ if (allocsize > filesize)
++ {
++ TIFFWarningExt(tif->tif_clientdata, "allocChoppedUpStripArrays",
++ "Requested memory size for StripByteCount and "
++ "StripOffsets %" PRIu64
++ " is greater than filesize %" PRIu64
++ ". Memory not allocated",
++ allocsize, filesize);
++ return;
++ }
+ }
+
+ newcounts = (uint64*) _TIFFCheckMalloc(tif, nstrips, sizeof (uint64),
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
new file mode 100644
index 0000000000..71b85cac10
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1354.patch
@@ -0,0 +1,212 @@
+From 87881e093691a35c60b91cafed058ba2dd5d9807 Mon Sep 17 00:00:00 2001
+From: Even Rouault <even.rouault@spatialys.com>
+Date: Sun, 5 Dec 2021 14:37:46 +0100
+Subject: [PATCH] TIFFReadDirectory: fix OJPEG hack (fixes #319)
+
+to avoid having the size of the strip arrays inconsistent with the
+number of strips returned by TIFFNumberOfStrips(), which may cause
+out-ouf-bounds array read afterwards.
+
+One of the OJPEG hack that alters SamplesPerPixel may influence the
+number of strips. Hence compute tif_dir.td_nstrips only afterwards.
+
+CVE: CVE-2022-1354
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/87f580f39011109b3bb5f6eca13fac543a542798]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ libtiff/tif_dirread.c | 162 ++++++++++++++++++++++--------------------
+ 1 file changed, 83 insertions(+), 79 deletions(-)
+
+diff --git a/libtiff/tif_dirread.c b/libtiff/tif_dirread.c
+index 8f434ef5..14c031d1 100644
+--- a/libtiff/tif_dirread.c
++++ b/libtiff/tif_dirread.c
+@@ -3794,50 +3794,7 @@ TIFFReadDirectory(TIFF* tif)
+ MissingRequired(tif,"ImageLength");
+ goto bad;
+ }
+- /*
+- * Setup appropriate structures (by strip or by tile)
+- */
+- if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
+- tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
+- tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
+- tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
+- tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
+- tif->tif_flags &= ~TIFF_ISTILED;
+- } else {
+- tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
+- tif->tif_flags |= TIFF_ISTILED;
+- }
+- if (!tif->tif_dir.td_nstrips) {
+- TIFFErrorExt(tif->tif_clientdata, module,
+- "Cannot handle zero number of %s",
+- isTiled(tif) ? "tiles" : "strips");
+- goto bad;
+- }
+- tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
+- if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
+- tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
+- if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
+-#ifdef OJPEG_SUPPORT
+- if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
+- (isTiled(tif)==0) &&
+- (tif->tif_dir.td_nstrips==1)) {
+- /*
+- * XXX: OJPEG hack.
+- * If a) compression is OJPEG, b) it's not a tiled TIFF,
+- * and c) the number of strips is 1,
+- * then we tolerate the absence of stripoffsets tag,
+- * because, presumably, all required data is in the
+- * JpegInterchangeFormat stream.
+- */
+- TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
+- } else
+-#endif
+- {
+- MissingRequired(tif,
+- isTiled(tif) ? "TileOffsets" : "StripOffsets");
+- goto bad;
+- }
+- }
++
+ /*
+ * Second pass: extract other information.
+ */
+@@ -4042,41 +3999,6 @@ TIFFReadDirectory(TIFF* tif)
+ } /* -- if (!dp->tdir_ignore) */
+ } /* -- for-loop -- */
+
+- if( tif->tif_mode == O_RDWR &&
+- tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
+- tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
+- {
+- /* Directory typically created with TIFFDeferStrileArrayWriting() */
+- TIFFSetupStrips(tif);
+- }
+- else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
+- {
+- if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripoffset_p))
+- {
+- goto bad;
+- }
+- }
+- if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
+- {
+- if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
+- tif->tif_dir.td_nstrips,
+- &tif->tif_dir.td_stripbytecount_p))
+- {
+- goto bad;
+- }
+- }
+- }
+-
+ /*
+ * OJPEG hack:
+ * - If a) compression is OJPEG, and b) photometric tag is missing,
+@@ -4147,6 +4069,88 @@ TIFFReadDirectory(TIFF* tif)
+ }
+ }
+
++ /*
++ * Setup appropriate structures (by strip or by tile)
++ * We do that only after the above OJPEG hack which alters SamplesPerPixel
++ * and thus influences the number of strips in the separate planarconfig.
++ */
++ if (!TIFFFieldSet(tif, FIELD_TILEDIMENSIONS)) {
++ tif->tif_dir.td_nstrips = TIFFNumberOfStrips(tif);
++ tif->tif_dir.td_tilewidth = tif->tif_dir.td_imagewidth;
++ tif->tif_dir.td_tilelength = tif->tif_dir.td_rowsperstrip;
++ tif->tif_dir.td_tiledepth = tif->tif_dir.td_imagedepth;
++ tif->tif_flags &= ~TIFF_ISTILED;
++ } else {
++ tif->tif_dir.td_nstrips = TIFFNumberOfTiles(tif);
++ tif->tif_flags |= TIFF_ISTILED;
++ }
++ if (!tif->tif_dir.td_nstrips) {
++ TIFFErrorExt(tif->tif_clientdata, module,
++ "Cannot handle zero number of %s",
++ isTiled(tif) ? "tiles" : "strips");
++ goto bad;
++ }
++ tif->tif_dir.td_stripsperimage = tif->tif_dir.td_nstrips;
++ if (tif->tif_dir.td_planarconfig == PLANARCONFIG_SEPARATE)
++ tif->tif_dir.td_stripsperimage /= tif->tif_dir.td_samplesperpixel;
++ if (!TIFFFieldSet(tif, FIELD_STRIPOFFSETS)) {
++#ifdef OJPEG_SUPPORT
++ if ((tif->tif_dir.td_compression==COMPRESSION_OJPEG) &&
++ (isTiled(tif)==0) &&
++ (tif->tif_dir.td_nstrips==1)) {
++ /*
++ * XXX: OJPEG hack.
++ * If a) compression is OJPEG, b) it's not a tiled TIFF,
++ * and c) the number of strips is 1,
++ * then we tolerate the absence of stripoffsets tag,
++ * because, presumably, all required data is in the
++ * JpegInterchangeFormat stream.
++ */
++ TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
++ } else
++#endif
++ {
++ MissingRequired(tif,
++ isTiled(tif) ? "TileOffsets" : "StripOffsets");
++ goto bad;
++ }
++ }
++
++ if( tif->tif_mode == O_RDWR &&
++ tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
++ tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 )
++ {
++ /* Directory typically created with TIFFDeferStrileArrayWriting() */
++ TIFFSetupStrips(tif);
++ }
++ else if( !(tif->tif_flags&TIFF_DEFERSTRILELOAD) )
++ {
++ if( tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripoffset_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripoffset_p))
++ {
++ goto bad;
++ }
++ }
++ if( tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 )
++ {
++ if (!TIFFFetchStripThing(tif,&(tif->tif_dir.td_stripbytecount_entry),
++ tif->tif_dir.td_nstrips,
++ &tif->tif_dir.td_stripbytecount_p))
++ {
++ goto bad;
++ }
++ }
++ }
++
+ /*
+ * Make sure all non-color channels are extrasamples.
+ * If it's not the case, define them as such.
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
new file mode 100644
index 0000000000..e59f5aad55
--- /dev/null
+++ b/meta/recipes-multimedia/libtiff/tiff/CVE-2022-1355.patch
@@ -0,0 +1,62 @@
+From fb1db384959698edd6caeea84e28253d272a0f96 Mon Sep 17 00:00:00 2001
+From: Su_Laus <sulau@freenet.de>
+Date: Sat, 2 Apr 2022 22:33:31 +0200
+Subject: [PATCH] tiffcp: avoid buffer overflow in "mode" string (fixes #400)
+
+CVE: CVE-2022-1355
+
+Upstream-Status: Backport
+[https://gitlab.com/libtiff/libtiff/-/commit/c1ae29f9ebacd29b7c3e0c7db671af7db3584bc2]
+
+Signed-off-by: Yi Zhao <yi.zhao@windriver.com>
+---
+ tools/tiffcp.c | 25 ++++++++++++++++++++-----
+ 1 file changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/tools/tiffcp.c b/tools/tiffcp.c
+index fd129bb7..8d944ff6 100644
+--- a/tools/tiffcp.c
++++ b/tools/tiffcp.c
+@@ -274,19 +274,34 @@ main(int argc, char* argv[])
+ deftilewidth = atoi(optarg);
+ break;
+ case 'B':
+- *mp++ = 'b'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'b'; *mp = '\0';
++ }
+ break;
+ case 'L':
+- *mp++ = 'l'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'l'; *mp = '\0';
++ }
+ break;
+ case 'M':
+- *mp++ = 'm'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'm'; *mp = '\0';
++ }
+ break;
+ case 'C':
+- *mp++ = 'c'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode) - 1))
++ {
++ *mp++ = 'c'; *mp = '\0';
++ }
+ break;
+ case '8':
+- *mp++ = '8'; *mp = '\0';
++ if (strlen(mode) < (sizeof(mode)-1))
++ {
++ *mp++ = '8'; *mp = '\0';
++ }
+ break;
+ case 'x':
+ pageInSeq = 1;
+--
+2.25.1
+
diff --git a/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb b/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
index e3ffb12f9e..7efaba3a38 100644
--- a/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
+++ b/meta/recipes-multimedia/libtiff/tiff_4.1.0.bb
@@ -18,7 +18,42 @@ SRC_URI = "http://download.osgeo.org/libtiff/tiff-${PV}.tar.gz \
file://0001-tiffset-fix-global-buffer-overflow-for-ASCII-tags-wh.patch \
file://561599c99f987dc32ae110370cfdd7df7975586b.patch \
file://eecb0712f4c3a5b449f70c57988260a667ddbdef.patch \
+ file://CVE-2022-0865.patch \
+ file://CVE-2022-0908.patch \
+ file://CVE-2022-0907.patch \
+ file://CVE-2022-0909.patch \
file://CVE-2022-0891.patch \
+ file://CVE-2022-0924.patch \
+ file://CVE-2022-2056-CVE-2022-2057-CVE-2022-2058.patch \
+ file://CVE-2022-34526.patch \
+ file://CVE-2022-2867-CVE-2022-2868-CVE-2022-2869.patch \
+ file://CVE-2022-1354.patch \
+ file://CVE-2022-1355.patch \
+ file://CVE-2022-3570_3598.patch \
+ file://CVE-2022-3597_3626_3627.patch \
+ file://CVE-2022-3599.patch \
+ file://CVE-2022-3970.patch \
+ file://CVE-2022-48281.patch \
+ file://CVE-2023-0795_0796_0797_0798_0799.patch \
+ file://CVE-2023-0800_0801_0802_0803_0804.patch \
+ file://CVE-2023-1916.patch \
+ file://CVE-2023-25433.patch \
+ file://CVE-2023-25434-CVE-2023-25435.patch \
+ file://CVE-2023-26965.patch \
+ file://CVE-2023-26966.patch \
+ file://CVE-2023-2908.patch \
+ file://CVE-2023-3316.patch \
+ file://CVE-2023-3576.patch \
+ file://CVE-2023-3618.patch \
+ file://CVE-2023-40745.patch \
+ file://CVE-2023-41175.patch \
+ file://CVE-2022-40090.patch \
+ file://CVE-2023-6228.patch \
+ file://CVE-2023-6277-1.patch \
+ file://CVE-2023-6277-2.patch \
+ file://CVE-2023-6277-3.patch \
+ file://CVE-2023-6277-4.patch \
+ file://CVE-2023-52356.patch \
"
SRC_URI[md5sum] = "2165e7aba557463acc0664e71a3ed424"
SRC_URI[sha256sum] = "5d29f32517dadb6dbcd1255ea5bbc93a2b54b94fbf83653b4d65c7d6775b8634"
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
new file mode 100644
index 0000000000..d293ab93ab
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-1999.patch
@@ -0,0 +1,55 @@
+From a486d800b60d0af4cc0836bf7ed8f21e12974129 Mon Sep 17 00:00:00 2001
+From: James Zern <jzern@google.com>
+Date: Wed, 22 Feb 2023 22:15:47 -0800
+Subject: [PATCH] EncodeAlphaInternal: clear result->bw on error
+
+This avoids a double free should the function fail prior to
+VP8BitWriterInit() and a previous trial result's buffer carried over.
+Previously in ApplyFiltersAndEncode() trial.bw (with a previous
+iteration's buffer) would be freed, followed by best.bw pointing to the
+same buffer.
+
+Since:
+187d379d add a fallback to ALPHA_NO_COMPRESSION
+
+In addition, check the return value of VP8BitWriterInit() in this
+function.
+
+Bug: webp:603
+Change-Id: Ic258381ee26c8c16bc211d157c8153831c8c6910
+
+CVE: CVE-2023-1999
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/a486d800b60d0af4cc0836bf7ed8f21e12974129]
+Signed-off-by: Nikhil R <nikhil.r@kpit.com>
+---
+ src/enc/alpha_enc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/enc/alpha_enc.c b/src/enc/alpha_enc.c
+index f7c02690e3..7d205586fe 100644
+--- a/src/enc/alpha_enc.c
++++ b/src/enc/alpha_enc.c
+@@ -13,6 +13,7 @@
+
+ #include <assert.h>
+ #include <stdlib.h>
++#include <string.h>
+
+ #include "src/enc/vp8i_enc.h"
+ #include "src/dsp/dsp.h"
+@@ -148,6 +149,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ }
+ } else {
+ VP8LBitWriterWipeOut(&tmp_bw);
++ memset(&result->bw, 0, sizeof(result->bw));
+ return 0;
+ }
+ }
+@@ -162,7 +164,7 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
+ header = method | (filter << 2);
+ if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4;
+
+- VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size);
++ if (!VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size)) ok = 0;
+ ok = ok && VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN);
+ ok = ok && VP8BitWriterAppend(&result->bw, output, output_size);
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
new file mode 100644
index 0000000000..419b12f7d9
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0001.patch
@@ -0,0 +1,366 @@
+From 902bc9190331343b2017211debcec8d2ab87e17a Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Thu, 7 Sep 2023 21:16:03 +0200
+Subject: [PATCH 1/2] Fix OOB write in BuildHuffmanTable.
+
+First, BuildHuffmanTable is called to check if the data is valid.
+If it is and the table is not big enough, more memory is allocated.
+
+This will make sure that valid (but unoptimized because of unbalanced
+codes) streams are still decodable.
+
+Bug: chromium:1479274
+Change-Id: I31c36dbf3aa78d35ecf38706b50464fd3d375741
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/902bc9190331343b2017211debcec8d2ab87e17a]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 46 ++++++++++---------
+ src/dec/vp8li_dec.h | 2 +-
+ src/utils/huffman_utils.c | 97 +++++++++++++++++++++++++++++++--------
+ src/utils/huffman_utils.h | 27 +++++++++--
+ 4 files changed, 129 insertions(+), 43 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 93615d4..0d38314 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -253,11 +253,11 @@ static int ReadHuffmanCodeLengths(
+ int symbol;
+ int max_symbol;
+ int prev_code_len = DEFAULT_CODE_LENGTH;
+- HuffmanCode table[1 << LENGTHS_TABLE_BITS];
++ HuffmanTables tables;
+
+- if (!VP8LBuildHuffmanTable(table, LENGTHS_TABLE_BITS,
+- code_length_code_lengths,
+- NUM_CODE_LENGTH_CODES)) {
++ if (!VP8LHuffmanTablesAllocate(1 << LENGTHS_TABLE_BITS, &tables) ||
++ !VP8LBuildHuffmanTable(&tables, LENGTHS_TABLE_BITS,
++ code_length_code_lengths, NUM_CODE_LENGTH_CODES)) {
+ goto End;
+ }
+
+@@ -277,7 +277,7 @@ static int ReadHuffmanCodeLengths(
+ int code_len;
+ if (max_symbol-- == 0) break;
+ VP8LFillBitWindow(br);
+- p = &table[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
++ p = &tables.curr_segment->start[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
+ VP8LSetBitPos(br, br->bit_pos_ + p->bits);
+ code_len = p->value;
+ if (code_len < kCodeLengthLiterals) {
+@@ -300,6 +300,7 @@ static int ReadHuffmanCodeLengths(
+ ok = 1;
+
+ End:
++ VP8LHuffmanTablesDeallocate(&tables);
+ if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
+ return ok;
+ }
+@@ -307,7 +308,8 @@ static int ReadHuffmanCodeLengths(
+ // 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman
+ // tree.
+ static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
+- int* const code_lengths, HuffmanCode* const table) {
++ int* const code_lengths,
++ HuffmanTables* const table) {
+ int ok = 0;
+ int size = 0;
+ VP8LBitReader* const br = &dec->br_;
+@@ -362,8 +364,7 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ VP8LMetadata* const hdr = &dec->hdr_;
+ uint32_t* huffman_image = NULL;
+ HTreeGroup* htree_groups = NULL;
+- HuffmanCode* huffman_tables = NULL;
+- HuffmanCode* huffman_table = NULL;
++ HuffmanTables* huffman_tables = &hdr->huffman_tables_;
+ int num_htree_groups = 1;
+ int num_htree_groups_max = 1;
+ int max_alphabet_size = 0;
+@@ -372,6 +373,10 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int* mapping = NULL;
+ int ok = 0;
+
++ // Check the table has been 0 initialized (through InitMetadata).
++ assert(huffman_tables->root.start == NULL);
++ assert(huffman_tables->curr_segment == NULL);
++
+ if (allow_recursion && VP8LReadBits(br, 1)) {
+ // use meta Huffman codes.
+ const int huffman_precision = VP8LReadBits(br, 3) + 2;
+@@ -434,16 +439,15 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+
+ code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
+ sizeof(*code_lengths));
+- huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
+- sizeof(*huffman_tables));
+ htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
+
+- if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
++ if (htree_groups == NULL || code_lengths == NULL ||
++ !VP8LHuffmanTablesAllocate(num_htree_groups * table_size,
++ huffman_tables)) {
+ dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
+ goto Error;
+ }
+
+- huffman_table = huffman_tables;
+ for (i = 0; i < num_htree_groups_max; ++i) {
+ // If the index "i" is unused in the Huffman image, just make sure the
+ // coefficients are valid but do not store them.
+@@ -468,19 +472,20 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ int max_bits = 0;
+ for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
+ int alphabet_size = kAlphabetSize[j];
+- htrees[j] = huffman_table;
+ if (j == 0 && color_cache_bits > 0) {
+ alphabet_size += (1 << color_cache_bits);
+ }
+- size = ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_table);
++ size =
++ ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables);
++ htrees[j] = huffman_tables->curr_segment->curr_table;
+ if (size == 0) {
+ goto Error;
+ }
+ if (is_trivial_literal && kLiteralMap[j] == 1) {
+- is_trivial_literal = (huffman_table->bits == 0);
++ is_trivial_literal = (htrees[j]->bits == 0);
+ }
+- total_size += huffman_table->bits;
+- huffman_table += size;
++ total_size += htrees[j]->bits;
++ huffman_tables->curr_segment->curr_table += size;
+ if (j <= ALPHA) {
+ int local_max_bits = code_lengths[0];
+ int k;
+@@ -515,14 +520,13 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
+ hdr->huffman_image_ = huffman_image;
+ hdr->num_htree_groups_ = num_htree_groups;
+ hdr->htree_groups_ = htree_groups;
+- hdr->huffman_tables_ = huffman_tables;
+
+ Error:
+ WebPSafeFree(code_lengths);
+ WebPSafeFree(mapping);
+ if (!ok) {
+ WebPSafeFree(huffman_image);
+- WebPSafeFree(huffman_tables);
++ VP8LHuffmanTablesDeallocate(huffman_tables);
+ VP8LHtreeGroupsFree(htree_groups);
+ }
+ return ok;
+@@ -1354,7 +1358,7 @@ static void ClearMetadata(VP8LMetadata* const hdr) {
+ assert(hdr != NULL);
+
+ WebPSafeFree(hdr->huffman_image_);
+- WebPSafeFree(hdr->huffman_tables_);
++ VP8LHuffmanTablesDeallocate(&hdr->huffman_tables_);
+ VP8LHtreeGroupsFree(hdr->htree_groups_);
+ VP8LColorCacheClear(&hdr->color_cache_);
+ VP8LColorCacheClear(&hdr->saved_color_cache_);
+@@ -1670,7 +1674,7 @@ int VP8LDecodeImage(VP8LDecoder* const dec) {
+ // Sanity checks.
+ if (dec == NULL) return 0;
+
+- assert(dec->hdr_.huffman_tables_ != NULL);
++ assert(dec->hdr_.huffman_tables_.root.start != NULL);
+ assert(dec->hdr_.htree_groups_ != NULL);
+ assert(dec->hdr_.num_htree_groups_ > 0);
+
+diff --git a/src/dec/vp8li_dec.h b/src/dec/vp8li_dec.h
+index 72b2e86..32540a4 100644
+--- a/src/dec/vp8li_dec.h
++++ b/src/dec/vp8li_dec.h
+@@ -51,7 +51,7 @@ typedef struct {
+ uint32_t* huffman_image_;
+ int num_htree_groups_;
+ HTreeGroup* htree_groups_;
+- HuffmanCode* huffman_tables_;
++ HuffmanTables huffman_tables_;
+ } VP8LMetadata;
+
+ typedef struct VP8LDecoder VP8LDecoder;
+diff --git a/src/utils/huffman_utils.c b/src/utils/huffman_utils.c
+index 0cba0fb..9efd628 100644
+--- a/src/utils/huffman_utils.c
++++ b/src/utils/huffman_utils.c
+@@ -177,21 +177,24 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ if (num_open < 0) {
+ return 0;
+ }
+- if (root_table == NULL) continue;
+ for (; count[len] > 0; --count[len]) {
+ HuffmanCode code;
+ if ((key & mask) != low) {
+- table += table_size;
++ if (root_table != NULL) table += table_size;
+ table_bits = NextTableBitSize(count, len, root_bits);
+ table_size = 1 << table_bits;
+ total_size += table_size;
+ low = key & mask;
+- root_table[low].bits = (uint8_t)(table_bits + root_bits);
+- root_table[low].value = (uint16_t)((table - root_table) - low);
++ if (root_table != NULL) {
++ root_table[low].bits = (uint8_t)(table_bits + root_bits);
++ root_table[low].value = (uint16_t)((table - root_table) - low);
++ }
++ }
++ if (root_table != NULL) {
++ code.bits = (uint8_t)(len - root_bits);
++ code.value = (uint16_t)sorted[symbol++];
++ ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ }
+- code.bits = (uint8_t)(len - root_bits);
+- code.value = (uint16_t)sorted[symbol++];
+- ReplicateValue(&table[key >> root_bits], step, table_size, code);
+ key = GetNextKey(key, len);
+ }
+ }
+@@ -211,25 +214,83 @@ static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+ ((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES)
+ // Cut-off value for switching between heap and stack allocation.
+ #define SORTED_SIZE_CUTOFF 512
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size) {
+- int total_size;
++ const int total_size =
++ BuildHuffmanTable(NULL, root_bits, code_lengths, code_lengths_size, NULL);
+ assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE);
+- if (root_table == NULL) {
+- total_size = BuildHuffmanTable(NULL, root_bits,
+- code_lengths, code_lengths_size, NULL);
+- } else if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
++ if (total_size == 0 || root_table == NULL) return total_size;
++
++ if (root_table->curr_segment->curr_table + total_size >=
++ root_table->curr_segment->start + root_table->curr_segment->size) {
++ // If 'root_table' does not have enough memory, allocate a new segment.
++ // The available part of root_table->curr_segment is left unused because we
++ // need a contiguous buffer.
++ const int segment_size = root_table->curr_segment->size;
++ struct HuffmanTablesSegment* next =
++ (HuffmanTablesSegment*)WebPSafeMalloc(1, sizeof(*next));
++ if (next == NULL) return 0;
++ // Fill the new segment.
++ // We need at least 'total_size' but if that value is small, it is better to
++ // allocate a big chunk to prevent more allocations later. 'segment_size' is
++ // therefore chosen (any other arbitrary value could be chosen).
++ next->size = total_size > segment_size ? total_size : segment_size;
++ next->start =
++ (HuffmanCode*)WebPSafeMalloc(next->size, sizeof(*next->start));
++ if (next->start == NULL) {
++ WebPSafeFree(next);
++ return 0;
++ }
++ next->curr_table = next->start;
++ next->next = NULL;
++ // Point to the new segment.
++ root_table->curr_segment->next = next;
++ root_table->curr_segment = next;
++ }
++ if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
+ // use local stack-allocated array.
+ uint16_t sorted[SORTED_SIZE_CUTOFF];
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
+- } else { // rare case. Use heap allocation.
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
++ } else { // rare case. Use heap allocation.
+ uint16_t* const sorted =
+ (uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted));
+ if (sorted == NULL) return 0;
+- total_size = BuildHuffmanTable(root_table, root_bits,
+- code_lengths, code_lengths_size, sorted);
++ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
++ code_lengths, code_lengths_size, sorted);
+ WebPSafeFree(sorted);
+ }
+ return total_size;
+ }
++
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables) {
++ // Have 'segment' point to the first segment for now, 'root'.
++ HuffmanTablesSegment* const root = &huffman_tables->root;
++ huffman_tables->curr_segment = root;
++ // Allocate root.
++ root->start = (HuffmanCode*)WebPSafeMalloc(size, sizeof(*root->start));
++ if (root->start == NULL) return 0;
++ root->curr_table = root->start;
++ root->next = NULL;
++ root->size = size;
++ return 1;
++}
++
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables) {
++ HuffmanTablesSegment *current, *next;
++ if (huffman_tables == NULL) return;
++ // Free the root node.
++ current = &huffman_tables->root;
++ next = current->next;
++ WebPSafeFree(current->start);
++ current->start = NULL;
++ current->next = NULL;
++ current = next;
++ // Free the following nodes.
++ while (current != NULL) {
++ next = current->next;
++ WebPSafeFree(current->start);
++ WebPSafeFree(current);
++ current = next;
++ }
++}
+diff --git a/src/utils/huffman_utils.h b/src/utils/huffman_utils.h
+index 13b7ad1..98415c5 100644
+--- a/src/utils/huffman_utils.h
++++ b/src/utils/huffman_utils.h
+@@ -43,6 +43,29 @@ typedef struct {
+ // or non-literal symbol otherwise
+ } HuffmanCode32;
+
++// Contiguous memory segment of HuffmanCodes.
++typedef struct HuffmanTablesSegment {
++ HuffmanCode* start;
++ // Pointer to where we are writing into the segment. Starts at 'start' and
++ // cannot go beyond 'start' + 'size'.
++ HuffmanCode* curr_table;
++ // Pointer to the next segment in the chain.
++ struct HuffmanTablesSegment* next;
++ int size;
++} HuffmanTablesSegment;
++
++// Chained memory segments of HuffmanCodes.
++typedef struct HuffmanTables {
++ HuffmanTablesSegment root;
++ // Currently processed segment. At first, this is 'root'.
++ HuffmanTablesSegment* curr_segment;
++} HuffmanTables;
++
++// Allocates a HuffmanTables with 'size' contiguous HuffmanCodes. Returns 0 on
++// memory allocation error, 1 otherwise.
++int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables);
++void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables);
++
+ #define HUFFMAN_PACKED_BITS 6
+ #define HUFFMAN_PACKED_TABLE_SIZE (1u << HUFFMAN_PACKED_BITS)
+
+@@ -78,9 +101,7 @@ void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups);
+ // the huffman table.
+ // Returns built table size or 0 in case of error (invalid tree or
+ // memory error).
+-// If root_table is NULL, it returns 0 if a lookup cannot be built, something
+-// > 0 otherwise (but not the table size).
+-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
++int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
+ const int code_lengths[], int code_lengths_size);
+
+ #ifdef __cplusplus
+--
+2.40.0
+
diff --git a/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
new file mode 100644
index 0000000000..c1eedb6100
--- /dev/null
+++ b/meta/recipes-multimedia/webp/files/CVE-2023-4863-0002.patch
@@ -0,0 +1,53 @@
+From 95ea5226c870449522240ccff26f0b006037c520 Mon Sep 17 00:00:00 2001
+From: Vincent Rabaud <vrabaud@google.com>
+Date: Mon, 11 Sep 2023 16:06:08 +0200
+Subject: [PATCH 2/2] Fix invalid incremental decoding check.
+
+The first condition is only necessary if we have not read enough
+(enough being defined by src_last, not src_end which is the end
+of the image).
+The second condition now fits the comment below: "if not
+incremental, and we are past the end of buffer".
+
+BUG=oss-fuzz:62136
+
+Change-Id: I0700f67c62db8e1c02c2e429a069a71e606a5e4f
+
+CVE: CVE-2023-4863
+
+Upstream-Status: Backport [https://github.com/webmproject/libwebp/commit/95ea5226c870449522240ccff26f0b006037c520]
+
+Signed-off-by: Soumya Sambu <soumya.sambu@windriver.com>
+---
+ src/dec/vp8l_dec.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/src/dec/vp8l_dec.c b/src/dec/vp8l_dec.c
+index 0d38314..684a5b6 100644
+--- a/src/dec/vp8l_dec.c
++++ b/src/dec/vp8l_dec.c
+@@ -1237,9 +1237,20 @@ static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data,
+ }
+
+ br->eos_ = VP8LIsEndOfStream(br);
+- if (dec->incremental_ && br->eos_ && src < src_end) {
++ // In incremental decoding:
++ // br->eos_ && src < src_last: if 'br' reached the end of the buffer and
++ // 'src_last' has not been reached yet, there is not enough data. 'dec' has to
++ // be reset until there is more data.
++ // !br->eos_ && src < src_last: this cannot happen as either the buffer is
++ // fully read, either enough has been read to reach 'src_last'.
++ // src >= src_last: 'src_last' is reached, all is fine. 'src' can actually go
++ // beyond 'src_last' in case the image is cropped and an LZ77 goes further.
++ // The buffer might have been enough or there is some left. 'br->eos_' does
++ // not matter.
++ assert(!dec->incremental_ || (br->eos_ && src < src_last) || src >= src_last);
++ if (dec->incremental_ && br->eos_ && src < src_last) {
+ RestoreState(dec);
+- } else if (!br->eos_) {
++ } else if ((dec->incremental_ && src >= src_last) || !br->eos_) {
+ // Process the remaining rows corresponding to last row-block.
+ if (process_func != NULL) {
+ process_func(dec, row > last_row ? last_row : row);
+--
+2.40.0
diff --git a/meta/recipes-multimedia/webp/libwebp_1.1.0.bb b/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
index 68e5ae2b3c..88c36cb76c 100644
--- a/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
+++ b/meta/recipes-multimedia/webp/libwebp_1.1.0.bb
@@ -19,6 +19,12 @@ SRC_URI[sha256sum] = "98a052268cc4d5ece27f76572a7f50293f439c17a98e67c4ea0c7ed6f5
UPSTREAM_CHECK_URI = "http://downloads.webmproject.org/releases/webp/index.html"
+SRC_URI += " \
+ file://CVE-2023-1999.patch \
+ file://CVE-2023-4863-0001.patch \
+ file://CVE-2023-4863-0002.patch \
+"
+
EXTRA_OECONF = " \
--disable-wic \
--enable-libwebpmux \
diff --git a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch b/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
deleted file mode 100644
index 57e7453312..0000000000
--- a/meta/recipes-support/apr/apr-util/0001-Fix-error-handling-in-gdbm.patch
+++ /dev/null
@@ -1,135 +0,0 @@
-From 6b638fa9afbeb54dfa19378e391465a5284ce1ad Mon Sep 17 00:00:00 2001
-From: Changqing Li <changqing.li@windriver.com>
-Date: Wed, 12 Sep 2018 17:16:36 +0800
-Subject: [PATCH] Fix error handling in gdbm
-
-Only check for gdbm_errno if the return value of the called gdbm_*
-function says so. This fixes apr-util with gdbm 1.14, which does not
-seem to always reset gdbm_errno.
-
-Also make the gdbm driver return error codes starting with
-APR_OS_START_USEERR instead of always returning APR_EGENERAL. This is
-what the berkleydb driver already does.
-
-Also ensure that dsize is 0 if dptr == NULL.
-
-Upstream-Status: Backport[https://svn.apache.org/viewvc?
-view=revision&amp;revision=1825311]
-
-Signed-off-by: Changqing Li <changqing.li@windriver.com>
----
- dbm/apr_dbm_gdbm.c | 47 +++++++++++++++++++++++++++++------------------
- 1 file changed, 29 insertions(+), 18 deletions(-)
-
-diff --git a/dbm/apr_dbm_gdbm.c b/dbm/apr_dbm_gdbm.c
-index 749447a..1c86327 100644
---- a/dbm/apr_dbm_gdbm.c
-+++ b/dbm/apr_dbm_gdbm.c
-@@ -36,13 +36,25 @@
- static apr_status_t g2s(int gerr)
- {
- if (gerr == -1) {
-- /* ### need to fix this */
-- return APR_EGENERAL;
-+ if (gdbm_errno == GDBM_NO_ERROR)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
- }
-
- return APR_SUCCESS;
- }
-
-+static apr_status_t gdat2s(datum d)
-+{
-+ if (d.dptr == NULL) {
-+ if (gdbm_errno == GDBM_NO_ERROR || gdbm_errno == GDBM_ITEM_NOT_FOUND)
-+ return APR_SUCCESS;
-+ return APR_OS_START_USEERR + gdbm_errno;
-+ }
-+
-+ return APR_SUCCESS;
-+}
-+
- static apr_status_t datum_cleanup(void *dptr)
- {
- if (dptr)
-@@ -53,22 +65,15 @@ static apr_status_t datum_cleanup(void *dptr)
-
- static apr_status_t set_error(apr_dbm_t *dbm, apr_status_t dbm_said)
- {
-- apr_status_t rv = APR_SUCCESS;
-
-- /* ### ignore whatever the DBM said (dbm_said); ask it explicitly */
-+ dbm->errcode = dbm_said;
-
-- if ((dbm->errcode = gdbm_errno) == GDBM_NO_ERROR) {
-+ if (dbm_said == APR_SUCCESS)
- dbm->errmsg = NULL;
-- }
-- else {
-- dbm->errmsg = gdbm_strerror(gdbm_errno);
-- rv = APR_EGENERAL; /* ### need something better */
-- }
--
-- /* captured it. clear it now. */
-- gdbm_errno = GDBM_NO_ERROR;
-+ else
-+ dbm->errmsg = gdbm_strerror(dbm_said - APR_OS_START_USEERR);
-
-- return rv;
-+ return dbm_said;
- }
-
- /* --------------------------------------------------------------------------
-@@ -107,7 +112,7 @@ static apr_status_t vt_gdbm_open(apr_dbm_t **pdb, const char *pathname,
- NULL);
-
- if (file == NULL)
-- return APR_EGENERAL; /* ### need a better error */
-+ return APR_OS_START_USEERR + gdbm_errno; /* ### need a better error */
-
- /* we have an open database... return it */
- *pdb = apr_pcalloc(pool, sizeof(**pdb));
-@@ -141,10 +146,12 @@ static apr_status_t vt_gdbm_fetch(apr_dbm_t *dbm, apr_datum_t key,
- if (pvalue->dptr)
- apr_pool_cleanup_register(dbm->pool, pvalue->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pvalue->dsize = 0;
-
- /* store the error info into DBM, and return a status code. Also, note
- that *pvalue should have been cleared on error. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_store(apr_dbm_t *dbm, apr_datum_t key,
-@@ -201,9 +208,11 @@ static apr_status_t vt_gdbm_firstkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
-@@ -221,9 +230,11 @@ static apr_status_t vt_gdbm_nextkey(apr_dbm_t *dbm, apr_datum_t *pkey)
- if (pkey->dptr)
- apr_pool_cleanup_register(dbm->pool, pkey->dptr, datum_cleanup,
- apr_pool_cleanup_null);
-+ else
-+ pkey->dsize = 0;
-
- /* store any error info into DBM, and return a status code. */
-- return set_error(dbm, APR_SUCCESS);
-+ return set_error(dbm, gdat2s(rd));
- }
-
- static void vt_gdbm_freedatum(apr_dbm_t *dbm, apr_datum_t data)
---
-2.7.4
-
diff --git a/meta/recipes-support/apr/apr-util_1.6.1.bb b/meta/recipes-support/apr/apr-util_1.6.3.bb
index f7d827a1d8..3d9d619c7b 100644
--- a/meta/recipes-support/apr/apr-util_1.6.1.bb
+++ b/meta/recipes-support/apr/apr-util_1.6.3.bb
@@ -13,11 +13,9 @@ SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.gz \
file://configfix.patch \
file://configure_fixes.patch \
file://run-ptest \
- file://0001-Fix-error-handling-in-gdbm.patch \
-"
+ "
-SRC_URI[md5sum] = "bd502b9a8670a8012c4d90c31a84955f"
-SRC_URI[sha256sum] = "b65e40713da57d004123b6319828be7f1273fbc6490e145874ee1177e112c459"
+SRC_URI[sha256sum] = "2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983"
EXTRA_OECONF = "--with-apr=${STAGING_BINDIR_CROSS}/apr-1-config \
--without-odbc \
@@ -35,6 +33,7 @@ OE_BINCONFIG_EXTRA_MANGLE = " -e 's:location=source:location=installed:'"
do_configure_append() {
if [ "${CLASSOVERRIDE}" = "class-target" ]; then
cp ${STAGING_DATADIR}/apr/apr_rules.mk ${B}/build/rules.mk
+ sed -i -e 's#^CFLAGS=.*#CFLAGS=${TARGET_CFLAGS}#g' ${B}/build/rules.mk
fi
}
do_configure_prepend_class-native() {
@@ -49,6 +48,7 @@ do_configure_append_class-native() {
do_configure_prepend_class-nativesdk() {
cp ${STAGING_DATADIR}/apr/apr_rules.mk ${S}/build/rules.mk
+ sed -i -e 's#^CFLAGS=.*#CFLAGS=${TARGET_CFLAGS}#g' ${S}/build/rules.mk
}
do_configure_append_class-nativesdk() {
diff --git a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
index abff4e9331..a274f3a16e 100644
--- a/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
+++ b/meta/recipes-support/apr/apr/0001-Add-option-to-disable-timed-dependant-tests.patch
@@ -1,14 +1,15 @@
-From 2bbe20b4f69e84e7a18bc79d382486953f479328 Mon Sep 17 00:00:00 2001
+From 225abf37cd0b49960664b59f08e515a4c4ea5ad0 Mon Sep 17 00:00:00 2001
From: Jeremy Puhlman <jpuhlman@mvista.com>
Date: Thu, 26 Mar 2020 18:30:36 +0000
Subject: [PATCH] Add option to disable timed dependant tests
-The disabled tests rely on timing to pass correctly. On a virtualized
+The disabled tests rely on timing to pass correctly. On a virtualized
system under heavy load, these tests randomly fail because they miss
a timer or other timing related issues.
Upstream-Status: Pending
Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
+
---
configure.in | 6 ++++++
include/apr.h.in | 1 +
@@ -16,10 +17,10 @@ Signed-off-by: Jeremy Puhlman <jpuhlman@mvista.com>
3 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/configure.in b/configure.in
-index d9f32d6..f0c5661 100644
+index bfd488b..3663220 100644
--- a/configure.in
+++ b/configure.in
-@@ -2886,6 +2886,12 @@ AC_ARG_ENABLE(timedlocks,
+@@ -3023,6 +3023,12 @@ AC_ARG_ENABLE(timedlocks,
)
AC_SUBST(apr_has_timedlocks)
@@ -45,10 +46,10 @@ index ee99def..c46a5f4 100644
#define APR_PROCATTR_USER_SET_REQUIRES_PASSWORD @apr_procattr_user_set_requires_password@
diff --git a/test/testlock.c b/test/testlock.c
-index a43f477..6233d0b 100644
+index e3437c1..04e01b9 100644
--- a/test/testlock.c
+++ b/test/testlock.c
-@@ -396,13 +396,13 @@ abts_suite *testlock(abts_suite *suite)
+@@ -535,7 +535,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, threads_not_impl, NULL);
#else
abts_run_test(suite, test_thread_mutex, NULL);
@@ -56,6 +57,8 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_thread_timedmutex, NULL);
#endif
+ abts_run_test(suite, test_thread_nestedmutex, NULL);
+@@ -543,7 +543,7 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, test_thread_rwlock, NULL);
abts_run_test(suite, test_cond, NULL);
abts_run_test(suite, test_timeoutcond, NULL);
@@ -63,7 +66,4 @@ index a43f477..6233d0b 100644
+#if APR_HAS_TIMEDLOCKS && APR_HAVE_TIME_DEPENDANT_TESTS
abts_run_test(suite, test_timeoutmutex, NULL);
#endif
- #endif
---
-2.23.0
-
+ #ifdef WIN32
diff --git a/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
new file mode 100644
index 0000000000..a78b16284f
--- /dev/null
+++ b/meta/recipes-support/apr/apr/0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch
@@ -0,0 +1,58 @@
+From 316b81c462f065927d7fec56aadd5c8cb94d1cf0 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Fri, 26 Aug 2022 00:28:08 -0700
+Subject: [PATCH] configure: Remove runtime test for mmap that can map
+ /dev/zero
+
+This never works for cross-compile moreover it ends up disabling
+ac_cv_file__dev_zero which then results in compiler errors in shared
+mutexes
+
+Upstream-Status: Inappropriate [Cross-compile specific]
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+
+---
+ configure.in | 30 ------------------------------
+ 1 file changed, 30 deletions(-)
+
+diff --git a/configure.in b/configure.in
+index 3663220..dce9789 100644
+--- a/configure.in
++++ b/configure.in
+@@ -1303,36 +1303,6 @@ AC_CHECK_FUNCS([mmap munmap shm_open shm_unlink shmget shmat shmdt shmctl \
+ APR_CHECK_DEFINE(MAP_ANON, sys/mman.h)
+ AC_CHECK_FILE(/dev/zero)
+
+-# Not all systems can mmap /dev/zero (such as HP-UX). Check for that.
+-if test "$ac_cv_func_mmap" = "yes" &&
+- test "$ac_cv_file__dev_zero" = "yes"; then
+- AC_CACHE_CHECK([for mmap that can map /dev/zero],
+- [ac_cv_mmap__dev_zero],
+- [AC_TRY_RUN([#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-#ifdef HAVE_SYS_MMAN_H
+-#include <sys/mman.h>
+-#endif
+- int main()
+- {
+- int fd;
+- void *m;
+- fd = open("/dev/zero", O_RDWR);
+- if (fd < 0) {
+- return 1;
+- }
+- m = mmap(0, sizeof(void*), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+- if (m == (void *)-1) { /* aka MAP_FAILED */
+- return 2;
+- }
+- if (munmap(m, sizeof(void*)) < 0) {
+- return 3;
+- }
+- return 0;
+- }], [], [ac_cv_file__dev_zero=no], [ac_cv_file__dev_zero=no])])
+-fi
+-
+ # Now we determine which one is our anonymous shmem preference.
+ haveshmgetanon="0"
+ havemmapzero="0"
diff --git a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
index 72e706f966..d63423f3a1 100644
--- a/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
+++ b/meta/recipes-support/apr/apr/0002-apr-Remove-workdir-path-references-from-installed-ap.patch
@@ -1,8 +1,7 @@
-From 5925b20da8bbc34d9bf5a5dca123ef38864d43c6 Mon Sep 17 00:00:00 2001
+From 689a8db96a6d1e1cae9cbfb35d05ac82140a6555 Mon Sep 17 00:00:00 2001
From: Hongxu Jia <hongxu.jia@windriver.com>
Date: Tue, 30 Jan 2018 09:39:06 +0800
-Subject: [PATCH 2/7] apr: Remove workdir path references from installed apr
- files
+Subject: [PATCH] apr: Remove workdir path references from installed apr files
Upstream-Status: Inappropriate [configuration]
@@ -14,20 +13,23 @@ packages at target run time, the workdir path caused confusion.
Rebase to 1.6.3
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+
---
- apr-config.in | 26 ++------------------------
- 1 file changed, 2 insertions(+), 24 deletions(-)
+ apr-config.in | 32 ++------------------------------
+ 1 file changed, 2 insertions(+), 30 deletions(-)
diff --git a/apr-config.in b/apr-config.in
-index 84b4073..bbbf651 100644
+index bed47ca..47874e5 100644
--- a/apr-config.in
+++ b/apr-config.in
-@@ -152,14 +152,7 @@ while test $# -gt 0; do
+@@ -164,16 +164,7 @@ while test $# -gt 0; do
flags="$flags $LDFLAGS"
;;
--includes)
- if test "$location" = "installed"; then
flags="$flags -I$includedir $EXTRA_INCLUDES"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -I$APR_TARGET_DIR/$includedir $EXTRA_INCLUDES"
- elif test "$location" = "source"; then
- flags="$flags -I$APR_SOURCE_DIR/include $EXTRA_INCLUDES"
- else
@@ -37,13 +39,15 @@ index 84b4073..bbbf651 100644
;;
--srcdir)
echo $APR_SOURCE_DIR
-@@ -181,29 +174,14 @@ while test $# -gt 0; do
+@@ -197,33 +188,14 @@ while test $# -gt 0; do
exit 0
;;
--link-ld)
- if test "$location" = "installed"; then
- ### avoid using -L if libdir is a "standard" location like /usr/lib
- flags="$flags -L$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L$APR_TARGET_DIR/$libdir -l${APR_LIBNAME}"
- else
- ### this surely can't work since the library is in .libs?
- flags="$flags -L$APR_BUILD_DIR -l${APR_LIBNAME}"
@@ -62,6 +66,8 @@ index 84b4073..bbbf651 100644
- # Since the user is specifying they are linking with libtool, we
- # *know* that -R will be recognized by libtool.
- flags="$flags -L$libdir -R$libdir -l${APR_LIBNAME}"
+- elif test "$location" = "crosscompile"; then
+- flags="$flags -L${APR_TARGET_DIR}/$libdir -l${APR_LIBNAME}"
- else
- flags="$flags $LA_FILE"
- fi
@@ -69,6 +75,3 @@ index 84b4073..bbbf651 100644
;;
--shlib-path-var)
echo "$SHLIBPATH_VAR"
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch b/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
deleted file mode 100644
index 4dd53bd8eb..0000000000
--- a/meta/recipes-support/apr/apr/0003-Makefile.in-configure.in-support-cross-compiling.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From d5028c10f156c224475b340cfb1ba025d6797243 Mon Sep 17 00:00:00 2001
-From: Hongxu Jia <hongxu.jia@windriver.com>
-Date: Fri, 2 Feb 2018 15:51:42 +0800
-Subject: [PATCH 3/7] Makefile.in/configure.in: support cross compiling
-
-While cross compiling, the tools/gen_test_char could not
-be executed at build time, use AX_PROG_CC_FOR_BUILD to
-build native tools/gen_test_char
-
-Upstream-Status: Submitted [https://github.com/apache/apr/pull/8]
-
-Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
----
- Makefile.in | 10 +++-------
- configure.in | 3 +++
- 2 files changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/Makefile.in b/Makefile.in
-index 5fb760e..8675f90 100644
---- a/Makefile.in
-+++ b/Makefile.in
-@@ -46,7 +46,7 @@ LT_VERSION = @LT_VERSION@
-
- CLEAN_TARGETS = apr-config.out apr.exp exports.c export_vars.c .make.dirs \
- build/apr_rules.out tools/gen_test_char@EXEEXT@ \
-- tools/gen_test_char.o tools/gen_test_char.lo \
-+ tools/gen_test_char.o \
- include/private/apr_escape_test_char.h
- DISTCLEAN_TARGETS = config.cache config.log config.status \
- include/apr.h include/arch/unix/apr_private.h \
-@@ -131,13 +131,9 @@ check: $(TARGET_LIB)
- etags:
- etags `find . -name '*.[ch]'`
-
--OBJECTS_gen_test_char = tools/gen_test_char.lo $(LOCAL_LIBS)
--tools/gen_test_char.lo: tools/gen_test_char.c
-+tools/gen_test_char@EXEEXT@: tools/gen_test_char.c
- $(APR_MKDIR) tools
-- $(LT_COMPILE)
--
--tools/gen_test_char@EXEEXT@: $(OBJECTS_gen_test_char)
-- $(LINK_PROG) $(OBJECTS_gen_test_char) $(ALL_LIBS)
-+ $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $< -o $@
-
- include/private/apr_escape_test_char.h: tools/gen_test_char@EXEEXT@
- $(APR_MKDIR) include/private
-diff --git a/configure.in b/configure.in
-index 719f331..361120f 100644
---- a/configure.in
-+++ b/configure.in
-@@ -183,6 +183,9 @@ dnl can only be used once within a configure script, so this prevents a
- dnl preload section from invoking the macro to get compiler info.
- AC_PROG_CC
-
-+dnl Check build CC for gen_test_char compiling which is executed at build time.
-+AX_PROG_CC_FOR_BUILD
-+
- dnl AC_PROG_SED is only avaliable in recent autoconf versions.
- dnl Use AC_CHECK_PROG instead if AC_PROG_SED is not present.
- ifdef([AC_PROG_SED],
---
-1.8.3.1
-
diff --git a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch b/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
deleted file mode 100644
index d1a2ebe881..0000000000
--- a/meta/recipes-support/apr/apr/0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 49661ea3858cf8494926cccf57d3e8c6dcb47117 Mon Sep 17 00:00:00 2001
-From: Dengke Du <dengke.du@windriver.com>
-Date: Wed, 14 Dec 2016 18:13:08 +0800
-Subject: [PATCH] apr: fix off_t size doesn't match in glibc when cross
- compiling
-
-In configure.in, it contains the following:
-
- APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-
-the macro "APR_CHECK_SIZEOF_EXTENDED" was defined in build/apr_common.m4,
-it use the "AC_TRY_RUN" macro, this macro let the off_t to 8, when cross
-compiling enable.
-
-So it was hardcoded for cross compiling, we should detect it dynamic based on
-the sysroot's glibc. We change it to the following:
-
- AC_CHECK_SIZEOF(off_t)
-
-The same for the following hardcoded types for cross compiling:
-
- pid_t 8
- ssize_t 8
- size_t 8
- off_t 8
-
-Change the above correspondingly.
-
-Signed-off-by: Dengke Du <dengke.du@windriver.com>
-
-Upstream-Status: Pending
-
----
- configure.in | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/configure.in b/configure.in
-index 27b8539..fb408d1 100644
---- a/configure.in
-+++ b/configure.in
-@@ -1801,7 +1801,7 @@ else
- socklen_t_value="int"
- fi
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], pid_t, 8)
-+AC_CHECK_SIZEOF(pid_t)
-
- if test "$ac_cv_sizeof_pid_t" = "$ac_cv_sizeof_short"; then
- pid_t_fmt='#define APR_PID_T_FMT "hd"'
-@@ -1873,7 +1873,7 @@ APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned long, lu, [size_t_fmt="lu"], [
- APR_CHECK_TYPES_FMT_COMPATIBLE(size_t, unsigned int, u, [size_t_fmt="u"])
- ])
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], ssize_t, 8)
-+AC_CHECK_SIZEOF(ssize_t)
-
- dnl the else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_ssize_t])
-@@ -1891,7 +1891,7 @@ fi
-
- ssize_t_fmt="#define APR_SSIZE_T_FMT \"$ssize_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <stddef.h>], size_t, 8)
-+AC_CHECK_SIZEOF(size_t)
-
- # else cases below should no longer occur;
- AC_MSG_CHECKING([which format to use for apr_size_t])
-@@ -1909,7 +1909,7 @@ fi
-
- size_t_fmt="#define APR_SIZE_T_FMT \"$size_t_fmt\""
-
--APR_CHECK_SIZEOF_EXTENDED([#include <sys/types.h>], off_t, 8)
-+AC_CHECK_SIZEOF(off_t)
-
- if test "${ac_cv_sizeof_off_t}${apr_cv_use_lfs64}" = "4yes"; then
- # Enable LFS
diff --git a/meta/recipes-support/apr/apr/CVE-2021-35940.patch b/meta/recipes-support/apr/apr/CVE-2021-35940.patch
deleted file mode 100644
index 00befdacee..0000000000
--- a/meta/recipes-support/apr/apr/CVE-2021-35940.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-
-SECURITY: CVE-2021-35940 (cve.mitre.org)
-
-Restore fix for CVE-2017-12613 which was missing in 1.7.x branch, though
-was addressed in 1.6.x in 1.6.3 and later via r1807976.
-
-The fix was merged back to 1.7.x in r1891198.
-
-Since this was a regression in 1.7.0, a new CVE name has been assigned
-to track this, CVE-2021-35940.
-
-Thanks to Iveta Cesalova <icesalov redhat.com> for reporting this issue.
-
-https://svn.apache.org/viewvc?view=revision&revision=1891198
-
-Upstream-Status: Backport
-CVE: CVE-2021-35940
-Signed-off-by: Armin Kuster <akuster@mvista.com>
-
-
-Index: time/unix/time.c
-===================================================================
---- a/time/unix/time.c (revision 1891197)
-+++ b/time/unix/time.c (revision 1891198)
-@@ -142,6 +142,9 @@
- static const int dayoffset[12] =
- {306, 337, 0, 31, 61, 92, 122, 153, 184, 214, 245, 275};
-
-+ if (xt->tm_mon < 0 || xt->tm_mon >= 12)
-+ return APR_EBADDATE;
-+
- /* shift new year to 1st March in order to make leap year calc easy */
-
- if (xt->tm_mon < 2)
-Index: time/win32/time.c
-===================================================================
---- a/time/win32/time.c (revision 1891197)
-+++ b/time/win32/time.c (revision 1891198)
-@@ -54,6 +54,9 @@
- static const int dayoffset[12] =
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
-
-+ if (tm->wMonth < 1 || tm->wMonth > 12)
-+ return APR_EBADDATE;
-+
- /* Note; the caller is responsible for filling in detailed tm_usec,
- * tm_gmtoff and tm_isdst data when applicable.
- */
-@@ -228,6 +231,9 @@
- static const int dayoffset[12] =
- {306, 337, 0, 31, 61, 92, 122, 153, 184, 214, 245, 275};
-
-+ if (xt->tm_mon < 0 || xt->tm_mon >= 12)
-+ return APR_EBADDATE;
-+
- /* shift new year to 1st March in order to make leap year calc easy */
-
- if (xt->tm_mon < 2)
diff --git a/meta/recipes-support/apr/apr/libtoolize_check.patch b/meta/recipes-support/apr/apr/libtoolize_check.patch
index 740792e6b0..80ce43caa4 100644
--- a/meta/recipes-support/apr/apr/libtoolize_check.patch
+++ b/meta/recipes-support/apr/apr/libtoolize_check.patch
@@ -1,6 +1,7 @@
+From 17835709bc55657b7af1f7c99b3f572b819cf97e Mon Sep 17 00:00:00 2001
From: Helmut Grohne <helmut@subdivi.de>
-Subject: check for libtoolize rather than libtool
-Last-Update: 2014-09-19
+Date: Tue, 7 Feb 2023 07:04:00 +0000
+Subject: [PATCH] check for libtoolize rather than libtool
libtool is now in package libtool-bin, but apr only needs libtoolize.
@@ -8,14 +9,22 @@ Upstream-Status: Pending [ from debian: https://sources.debian.org/data/main/a/a
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
---- apr.orig/build/buildcheck.sh
-+++ apr/build/buildcheck.sh
-@@ -39,11 +39,11 @@ fi
+---
+ build/buildcheck.sh | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/build/buildcheck.sh b/build/buildcheck.sh
+index 44921b5..08bc8a8 100755
+--- a/build/buildcheck.sh
++++ b/build/buildcheck.sh
+@@ -39,13 +39,11 @@ fi
# ltmain.sh (GNU libtool 1.1361 2004/01/02 23:10:52) 1.5a
# output is multiline from 1.5 onwards
-# Require libtool 1.4 or newer
--libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-if test -z "$libtool"; then
+- libtool=`build/PrintPath glibtool1 glibtool libtool libtool15 libtool14`
+-fi
-lt_pversion=`$libtool --version 2>/dev/null|sed -e 's/([^)]*)//g;s/^[^0-9]*//;s/[- ].*//g;q'`
+# Require libtoolize 1.4 or newer
+libtoolize=`build/PrintPath glibtoolize1 glibtoolize libtoolize libtoolize15 libtoolize14`
diff --git a/meta/recipes-support/apr/apr_1.7.0.bb b/meta/recipes-support/apr/apr_1.7.2.bb
index 92cc61a864..807dce21da 100644
--- a/meta/recipes-support/apr/apr_1.7.0.bb
+++ b/meta/recipes-support/apr/apr_1.7.2.bb
@@ -16,18 +16,15 @@ BBCLASSEXTEND = "native nativesdk"
SRC_URI = "${APACHE_MIRROR}/apr/${BPN}-${PV}.tar.bz2 \
file://run-ptest \
file://0002-apr-Remove-workdir-path-references-from-installed-ap.patch \
- file://0003-Makefile.in-configure.in-support-cross-compiling.patch \
file://0004-Fix-packet-discards-HTTP-redirect.patch \
file://0005-configure.in-fix-LTFLAGS-to-make-it-work-with-ccache.patch \
- file://0006-apr-fix-off_t-size-doesn-t-match-in-glibc-when-cross.patch \
file://0007-explicitly-link-libapr-against-phtread-to-make-gold-.patch \
file://libtoolize_check.patch \
file://0001-Add-option-to-disable-timed-dependant-tests.patch \
- file://CVE-2021-35940.patch \
+ file://0001-configure-Remove-runtime-test-for-mmap-that-can-map-.patch \
"
-SRC_URI[md5sum] = "7a14a83d664e87599ea25ff4432e48a7"
-SRC_URI[sha256sum] = "e2e148f0b2e99b8e5c6caa09f6d4fb4dd3e83f744aa72a952f94f5a14436f7ea"
+SRC_URI[sha256sum] = "75e77cc86776c030c0a5c408dfbd0bf2a0b75eed5351e52d5439fa1e5509a43e"
inherit autotools-brokensep lib_package binconfig multilib_header ptest multilib_script
@@ -35,17 +32,30 @@ OE_BINCONFIG_EXTRA_MANGLE = " -e 's:location=source:location=installed:'"
# Added to fix some issues with cmake. Refer to https://github.com/bmwcarit/meta-ros/issues/68#issuecomment-19896928
CACHED_CONFIGUREVARS += "apr_cv_mutex_recursive=yes"
-
+# Enable largefile
+CACHED_CONFIGUREVARS += "apr_cv_use_lfs64=yes"
+# Additional AC_TRY_RUN tests which will need to be cached for cross compile
+CACHED_CONFIGUREVARS += "apr_cv_epoll=yes epoll_create1=yes apr_cv_sock_cloexec=yes \
+ ac_cv_struct_rlimit=yes \
+ ac_cv_func_sem_open=yes \
+ apr_cv_process_shared_works=yes \
+ apr_cv_mutex_robust_shared=yes \
+ "
# Also suppress trying to use sctp.
#
CACHED_CONFIGUREVARS += "ac_cv_header_netinet_sctp_h=no ac_cv_header_netinet_sctp_uio_h=no"
-CACHED_CONFIGUREVARS += "ac_cv_sizeof_struct_iovec=yes"
+# ac_cv_sizeof_struct_iovec is deduced using runtime check which will fail during cross-compile
+CACHED_CONFIGUREVARS += "${@['ac_cv_sizeof_struct_iovec=16','ac_cv_sizeof_struct_iovec=8'][d.getVar('SITEINFO_BITS') != '32']}"
+
CACHED_CONFIGUREVARS += "ac_cv_file__dev_zero=yes"
+CACHED_CONFIGUREVARS:append:libc-musl = " ac_cv_strerror_r_rc_int=yes"
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}"
+PACKAGECONFIG:append:libc-musl = " xsi-strerror"
PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6,"
PACKAGECONFIG[timed-tests] = "--enable-timed-tests,--disable-timed-tests,"
+PACKAGECONFIG[xsi-strerror] = "ac_cv_strerror_r_rc_int=yes,ac_cv_strerror_r_rc_int=no,"
do_configure_prepend() {
# Avoid absolute paths for grep since it causes failures
diff --git a/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb b/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
index 97b88ec033..6a93cacc18 100644
--- a/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
+++ b/meta/recipes-support/bmap-tools/bmap-tools_3.5.bb
@@ -9,7 +9,7 @@ SECTION = "console/utils"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263"
-SRC_URI = "git://github.com/intel/${BPN};branch=master;protocol=https"
+SRC_URI = "git://github.com/intel/${BPN};branch=main;protocol=https"
SRCREV = "db7087b883bf52cbff063ad17a41cc1cbb85104d"
S = "${WORKDIR}/git"
diff --git a/meta/recipes-support/curl/curl/CVE-2022-22576.patch b/meta/recipes-support/curl/curl/CVE-2022-22576.patch
new file mode 100644
index 0000000000..13479e7f0e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-22576.patch
@@ -0,0 +1,148 @@
+From 852aa5ad351ea53e5f01d2f44b5b4370c2bf5425 Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Mon, 25 Apr 2022 11:44:05 +0200
+Subject: [PATCH] url: check sasl additional parameters for connection reuse.
+
+Also move static function safecmp() as non-static Curl_safecmp() since
+its purpose is needed at several places.
+
+Bug: https://curl.se/docs/CVE-2022-22576.html
+
+CVE-2022-22576
+
+Closes #8746
+---
+ lib/strcase.c | 10 ++++++++++
+ lib/strcase.h | 2 ++
+ lib/url.c | 13 ++++++++++++-
+ lib/urldata.h | 1 +
+ lib/vtls/vtls.c | 21 ++++++---------------
+ 5 files changed, 31 insertions(+), 16 deletions(-)
+
+CVE: CVE-2022-22576
+Upstream-Status: Backport [https://github.com/curl/curl/commit/852aa5ad351ea53e5f01d2f44b5b4370c2bf5425.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/strcase.c b/lib/strcase.c
+index dd46ca1ba0e5..692a3f14aee7 100644
+--- a/lib/strcase.c
++++ b/lib/strcase.c
+@@ -251,6 +251,16 @@
+ } while(*src++ && --n);
+ }
+
++/* Compare case-sensitive NUL-terminated strings, taking care of possible
++ * null pointers. Return true if arguments match.
++ */
++bool Curl_safecmp(char *a, char *b)
++{
++ if(a && b)
++ return !strcmp(a, b);
++ return !a && !b;
++}
++
+ /* --- public functions --- */
+
+ int curl_strequal(const char *first, const char *second)
+diff --git a/lib/strcase.h b/lib/strcase.h
+index b234d3815220..2635f5117e99 100644
+--- a/lib/strcase.h
++++ b/lib/strcase.h
+@@ -48,4 +48,6 @@
+ void Curl_strntoupper(char *dest, const char *src, size_t n);
+ void Curl_strntolower(char *dest, const char *src, size_t n);
+
++bool Curl_safecmp(char *a, char *b);
++
+ #endif /* HEADER_CURL_STRCASE_H */
+diff --git a/lib/url.c b/lib/url.c
+index 9a988b4d58d8..e1647b133854 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -730,6 +730,7 @@
+ Curl_safefree(conn->allocptr.host);
+ Curl_safefree(conn->allocptr.cookiehost);
+ Curl_safefree(conn->allocptr.rtsp_transport);
++ Curl_safefree(conn->oauth_bearer);
+ Curl_safefree(conn->trailer);
+ Curl_safefree(conn->host.rawalloc); /* host name buffer */
+ Curl_safefree(conn->conn_to_host.rawalloc); /* host name buffer */
+@@ -1251,7 +1252,9 @@
+ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+ if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd)) {
++ strcmp(needle->passwd, check->passwd) ||
++ !Curl_safecmp(needle->sasl_authzid, check->sasl_authzid) ||
++ !Curl_safecmp(needle->oauth_bearer, check->oauth_bearer)) {
+ /* one of them was different */
+ continue;
+ }
+@@ -3392,6 +3395,14 @@
+ result = CURLE_OUT_OF_MEMORY;
+ goto out;
+ }
++ }
++
++ if(data->set.str[STRING_BEARER]) {
++ conn->oauth_bearer = strdup(data->set.str[STRING_BEARER]);
++ if(!conn->oauth_bearer) {
++ result = CURLE_OUT_OF_MEMORY;
++ goto out;
++ }
+ }
+
+ #ifdef USE_UNIX_SOCKETS
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 07eb19b87034..1d89b8d7fa68 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -949,6 +949,8 @@
+
+ char *sasl_authzid; /* authorisation identity string, allocated */
+
++ char *oauth_bearer; /* OAUTH2 bearer, allocated */
++
+ int httpversion; /* the HTTP version*10 reported by the server */
+ int rtspversion; /* the RTSP version*10 reported by the server */
+
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index 03b85ba065e5..a40ac06f684f 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -82,15 +82,6 @@
+ else \
+ dest->var = NULL;
+
+-static bool safecmp(char *a, char *b)
+-{
+- if(a && b)
+- return !strcmp(a, b);
+- else if(!a && !b)
+- return TRUE; /* match */
+- return FALSE; /* no match */
+-}
+-
+
+ bool
+ Curl_ssl_config_matches(struct ssl_primary_config* data,
+@@ -101,12 +101,12 @@
+ (data->verifypeer == needle->verifypeer) &&
+ (data->verifyhost == needle->verifyhost) &&
+ (data->verifystatus == needle->verifystatus) &&
+- safecmp(data->CApath, needle->CApath) &&
+- safecmp(data->CAfile, needle->CAfile) &&
+- safecmp(data->issuercert, needle->issuercert) &&
+- safecmp(data->clientcert, needle->clientcert) &&
+- safecmp(data->random_file, needle->random_file) &&
+- safecmp(data->egdsocket, needle->egdsocket) &&
++ Curl_safecmp(data->CApath, needle->CApath) &&
++ Curl_safecmp(data->CAfile, needle->CAfile) &&
++ Curl_safecmp(data->issuercert, needle->issuercert) &&
++ Curl_safecmp(data->clientcert, needle->clientcert) &&
++ Curl_safecmp(data->random_file, needle->random_file) &&
++ Curl_safecmp(data->egdsocket, needle->egdsocket) &&
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch
new file mode 100644
index 0000000000..063c11712a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-1.patch
@@ -0,0 +1,45 @@
+From 2a797e099731facf62a2c675396334bc2ad3bc7c Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 16:24:33 +0200
+Subject: [PATCH] connect: store "conn_remote_port" in the info struct
+
+To make it available after the connection ended.
+
+Prerequisite for the patches that address CVE-2022-27774.
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/08b8ef4e726ba10f45081ecda5b3cea788d3c839]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/connect.c | 1 +
+ lib/urldata.h | 6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/lib/connect.c b/lib/connect.c
+index b3d4057..a977d67 100644
+--- a/lib/connect.c
++++ b/lib/connect.c
+@@ -624,6 +624,7 @@ void Curl_persistconninfo(struct connectdata *conn)
+ conn->data->info.conn_scheme = conn->handler->scheme;
+ conn->data->info.conn_protocol = conn->handler->protocol;
+ conn->data->info.conn_primary_port = conn->primary_port;
++ conn->data->info.conn_remote_port = conn->remote_port;
+ conn->data->info.conn_local_port = conn->local_port;
+ }
+
+diff --git a/lib/urldata.h b/lib/urldata.h
+index fafb7a3..ab1b267 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1148,7 +1148,11 @@ struct PureInfo {
+ reused, in the connection cache. */
+
+ char conn_primary_ip[MAX_IPADR_LEN];
+- long conn_primary_port;
++ long conn_primary_port; /* this is the destination port to the connection,
++ which might have been a proxy */
++ long conn_remote_port; /* this is the "remote port", which is the port
++ number of the used URL, independent of proxy or
++ not */
+ char conn_local_ip[MAX_IPADR_LEN];
+ long conn_local_port;
+ const char *conn_scheme;
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch
new file mode 100644
index 0000000000..c64d614194
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-2.patch
@@ -0,0 +1,80 @@
+From 5c2f3b3a5f115625134669d90d591de9c5aafc8e Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 16:24:33 +0200
+Subject: [PATCH] transfer: redirects to other protocols or ports clear auth
+
+... unless explicitly permitted.
+
+Bug: https://curl.se/docs/CVE-2022-27774.html
+Reported-by: Harry Sintonen
+Closes #8748
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/620ea21410030a9977396b4661806bc187231b79]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/transfer.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 48 insertions(+), 1 deletion(-)
+
+diff --git a/lib/transfer.c b/lib/transfer.c
+index 744e1c0..ac69d27 100644
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1627,10 +1627,57 @@ CURLcode Curl_follow(struct Curl_easy *data,
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else {
+-
+ uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
+ if(uc)
+ return Curl_uc_to_curlcode(uc);
++
++ /* Clear auth if this redirects to a different port number or protocol,
++ unless permitted */
++ if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
++ char *portnum;
++ int port;
++ bool clear = FALSE;
++
++ if(data->set.use_port && data->state.allow_port)
++ /* a custom port is used */
++ port = (int)data->set.use_port;
++ else {
++ uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum,
++ CURLU_DEFAULT_PORT);
++ if(uc) {
++ free(newurl);
++ return Curl_uc_to_curlcode(uc);
++ }
++ port = atoi(portnum);
++ free(portnum);
++ }
++ if(port != data->info.conn_remote_port) {
++ infof(data, "Clear auth, redirects to port from %u to %u",
++ data->info.conn_remote_port, port);
++ clear = TRUE;
++ }
++ else {
++ char *scheme;
++ const struct Curl_handler *p;
++ uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0);
++ if(uc) {
++ free(newurl);
++ return Curl_uc_to_curlcode(uc);
++ }
++
++ p = Curl_builtin_scheme(scheme);
++ if(p && (p->protocol != data->info.conn_protocol)) {
++ infof(data, "Clear auth, redirects scheme from %s to %s",
++ data->info.conn_scheme, scheme);
++ clear = TRUE;
++ }
++ free(scheme);
++ }
++ if(clear) {
++ Curl_safefree(data->set.str[STRING_USERNAME]);
++ Curl_safefree(data->set.str[STRING_PASSWORD]);
++ }
++ }
+ }
+
+ if(type == FOLLOW_FAKE) {
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch
new file mode 100644
index 0000000000..a585f6a8fa
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-3.patch
@@ -0,0 +1,83 @@
+From 5dccf21ad49eed925e8f76b0cb844877239ce23d Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 17:59:15 +0200
+Subject: [PATCH] openssl: don't leak the SRP credentials in redirects either
+
+Follow-up to 620ea21410030
+
+Reported-by: Harry Sintonen
+Closes #8751
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/139a54ed0a172adaaf1a78d6f4fff50b2c3f9e08]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/http.c | 10 +++++-----
+ lib/http.h | 6 ++++++
+ lib/vtls/openssl.c | 3 ++-
+ 3 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/lib/http.c b/lib/http.c
+index 8b16c09..5291c07 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -732,10 +732,10 @@ output_auth_headers(struct connectdata *conn,
+ }
+
+ /*
+- * allow_auth_to_host() tells if autentication, cookies or other "sensitive
+- * data" can (still) be sent to this host.
++ * Curl_allow_auth_to_host() tells if authentication, cookies or other
++ * "sensitive data" can (still) be sent to this host.
+ */
+-static bool allow_auth_to_host(struct Curl_easy *data)
++bool Curl_allow_auth_to_host(struct Curl_easy *data)
+ {
+ struct connectdata *conn = data->conn;
+ return (!data->state.this_is_a_follow ||
+@@ -816,7 +816,7 @@ Curl_http_output_auth(struct connectdata *conn,
+
+ /* To prevent the user+password to get sent to other than the original host
+ due to a location-follow */
+- if(allow_auth_to_host(data)
++ if(Curl_allow_auth_to_host(data)
+ || conn->bits.netrc
+ )
+ result = output_auth_headers(conn, authhost, request, path, FALSE);
+@@ -1891,7 +1891,7 @@ CURLcode Curl_add_custom_headers(struct connectdata *conn,
+ checkprefix("Cookie:", compare)) &&
+ /* be careful of sending this potentially sensitive header to
+ other hosts */
+- !allow_auth_to_host(data))
++ !Curl_allow_auth_to_host(data))
+ ;
+ else {
+ result = Curl_add_bufferf(&req_buffer, "%s\r\n", compare);
+diff --git a/lib/http.h b/lib/http.h
+index 4c1825f..4fbae1d 100644
+--- a/lib/http.h
++++ b/lib/http.h
+@@ -273,4 +273,10 @@ Curl_http_output_auth(struct connectdata *conn,
+ bool proxytunnel); /* TRUE if this is the request setting
+ up the proxy tunnel */
+
++/*
++ * Curl_allow_auth_to_host() tells if authentication, cookies or other
++ * "sensitive data" can (still) be sent to this host.
++ */
++bool Curl_allow_auth_to_host(struct Curl_easy *data);
++
+ #endif /* HEADER_CURL_HTTP_H */
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index 006a8c8..a14cecc 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -2739,7 +2739,8 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ #endif
+
+ #ifdef USE_TLS_SRP
+- if(ssl_authtype == CURL_TLSAUTH_SRP) {
++ if((ssl_authtype == CURL_TLSAUTH_SRP) &&
++ Curl_allow_auth_to_host(data)) {
+ char * const ssl_username = SSL_SET_OPTION(username);
+
+ infof(data, "Using TLS-SRP username: %s\n", ssl_username);
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch b/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch
new file mode 100644
index 0000000000..2258681cab
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27774-4.patch
@@ -0,0 +1,35 @@
+From 7395752e2f7b87dc8c8f2a7137075e2da554aaea Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 26 Apr 2022 07:46:19 +0200
+Subject: [PATCH] gnutls: don't leak the SRP credentials in redirects
+
+Follow-up to 620ea21410030 and 139a54ed0a172a
+
+Reported-by: Harry Sintonen
+Closes #8752
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/093531556203decd92d92bccd431edbe5561781c]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/vtls/gtls.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 8c05102..3d0758d 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -581,11 +581,11 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) {
++ if((SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) &&
++ Curl_allow_auth_to_host(data)) {
+ infof(data, "Using TLS-SRP username: %s\n", SSL_SET_OPTION(username));
+
+- rc = gnutls_srp_allocate_client_credentials(
+- &BACKEND->srp_client_cred);
++ rc = gnutls_srp_allocate_client_credentials(&BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_srp_allocate_client_cred() failed: %s",
+ gnutls_strerror(rc));
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27775.patch b/meta/recipes-support/curl/curl/CVE-2022-27775.patch
new file mode 100644
index 0000000000..b3fe7b4494
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27775.patch
@@ -0,0 +1,39 @@
+From 058f98dc3fe595f21dc26a5b9b1699e519ba5705 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 11:48:00 +0200
+Subject: [PATCH] conncache: include the zone id in the "bundle" hashkey
+
+Make connections to two separate IPv6 zone ids create separate
+connections.
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27775.html
+Closes #8747
+---
+ lib/conncache.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+CVE: CVE-2022-27775
+Upstream-Status: Backport [https://github.com/curl/curl/commit/058f98dc3fe595f21dc26a5b9b1699e519ba5705.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/conncache.c b/lib/conncache.c
+index ec669b971dc3..8948b53fa500 100644
+--- a/lib/conncache.c
++++ b/lib/conncache.c
+@@ -156,8 +156,12 @@
+ /* report back which name we used */
+ *hostp = hostname;
+
+- /* put the number first so that the hostname gets cut off if too long */
+- msnprintf(buf, len, "%ld%s", port, hostname);
++ /* put the numbers first so that the hostname gets cut off if too long */
++#ifdef ENABLE_IPV6
++ msnprintf(buf, len, "%u/%ld/%s", conn->scope_id, port, hostname);
++#else
++ msnprintf(buf, len, "%ld/%s", port, hostname);
++#endif
+ }
+
+ /* Returns number of connections currently held in the connection cache.
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27776.patch b/meta/recipes-support/curl/curl/CVE-2022-27776.patch
new file mode 100644
index 0000000000..1a13df2d95
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27776.patch
@@ -0,0 +1,114 @@
+From 6e659993952aa5f90f48864be84a1bbb047fc258 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 25 Apr 2022 13:05:40 +0200
+Subject: [PATCH] http: avoid auth/cookie on redirects same host diff port
+
+CVE-2022-27776
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27776.html
+Closes #8749
+---
+ lib/http.c | 34 ++++++++++++++++++++++------------
+ lib/urldata.h | 16 +++++++++-------
+ 2 files changed, 31 insertions(+), 19 deletions(-)
+
+CVE: CVE-2022-27776
+Upstream-Status: Backport [https://github.com/curl/curl/commit/6e659993952aa5f90f48864be84a1bbb047fc258.patch]
+Comment: Refreshed patch
+Signed-off-by: Sana.Kazi <Sana.Kazi@kpit.com>
+
+diff --git a/lib/http.c b/lib/http.c
+index ce79fc4e31c8..f0476f3b9272 100644
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -731,6 +731,21 @@
+ return CURLE_OK;
+ }
+
++/*
++ * allow_auth_to_host() tells if autentication, cookies or other "sensitive
++ * data" can (still) be sent to this host.
++ */
++static bool allow_auth_to_host(struct Curl_easy *data)
++{
++ struct connectdata *conn = data->conn;
++ return (!data->state.this_is_a_follow ||
++ data->set.allow_auth_to_other_hosts ||
++ (data->state.first_host &&
++ strcasecompare(data->state.first_host, conn->host.name) &&
++ (data->state.first_remote_port == conn->remote_port) &&
++ (data->state.first_remote_protocol == conn->handler->protocol)));
++}
++
+ /**
+ * Curl_http_output_auth() setups the authentication headers for the
+ * host/proxy and the correct authentication
+@@ -799,15 +799,12 @@
+ with it */
+ authproxy->done = TRUE;
+
+- /* To prevent the user+password to get sent to other than the original
+- host due to a location-follow, we do some weirdo checks here */
+- if(!data->state.this_is_a_follow ||
+- conn->bits.netrc ||
+- !data->state.first_host ||
+- data->set.allow_auth_to_other_hosts ||
+- strcasecompare(data->state.first_host, conn->host.name)) {
++ /* To prevent the user+password to get sent to other than the original host
++ due to a location-follow */
++ if(allow_auth_to_host(data)
++ || conn->bits.netrc
++ )
+ result = output_auth_headers(conn, authhost, request, path, FALSE);
+- }
+ else
+ authhost->done = TRUE;
+
+@@ -1879,10 +1891,7 @@
+ checkprefix("Cookie:", compare)) &&
+ /* be careful of sending this potentially sensitive header to
+ other hosts */
+- (data->state.this_is_a_follow &&
+- data->state.first_host &&
+- !data->set.allow_auth_to_other_hosts &&
+- !strcasecompare(data->state.first_host, conn->host.name)))
++ !allow_auth_to_host(data))
+ ;
+ else {
+ result = Curl_add_bufferf(&req_buffer, "%s\r\n", compare);
+@@ -2065,6 +2074,7 @@
+ return CURLE_OUT_OF_MEMORY;
+
+ data->state.first_remote_port = conn->remote_port;
++ data->state.first_remote_protocol = conn->handler->protocol;
+ }
+
+ if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 1d89b8d7fa68..ef2174d9e727 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1342,13 +1342,15 @@
+ char *ulbuf; /* allocated upload buffer or NULL */
+ curl_off_t current_speed; /* the ProgressShow() function sets this,
+ bytes / second */
+- char *first_host; /* host name of the first (not followed) request.
+- if set, this should be the host name that we will
+- sent authorization to, no else. Used to make Location:
+- following not keep sending user+password... This is
+- strdup() data.
+- */
+- int first_remote_port; /* remote port of the first (not followed) request */
++
++ /* host name, port number and protocol of the first (not followed) request.
++ if set, this should be the host name that we will sent authorization to,
++ no else. Used to make Location: following not keep sending user+password.
++ This is strdup()ed data. */
++ char *first_host;
++ int first_remote_port;
++ unsigned int first_remote_protocol;
++
+ struct curl_ssl_session *session; /* array of 'max_ssl_sessions' size */
+ long sessionage; /* number of the most recent session */
+ unsigned int tempcount; /* number of entries in use in tempwrite, 0 - 3 */
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27781.patch b/meta/recipes-support/curl/curl/CVE-2022-27781.patch
new file mode 100644
index 0000000000..ea1bc22928
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27781.patch
@@ -0,0 +1,46 @@
+From 7a1f183039a6a6c9099a114f5e5c94777413c767 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 10:07:15 +0200
+Subject: [PATCH] nss: return error if seemingly stuck in a cert loop
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+CVE-2022-27781
+
+Reported-by: Florian Kohnhäuser
+Bug: https://curl.se/docs/CVE-2022-27781.html
+Closes #8822
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/5c7da89d404bf59c8dd82a001119a16d18365917]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/vtls/nss.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c
+index 375c78b..86102f7 100644
+--- a/lib/vtls/nss.c
++++ b/lib/vtls/nss.c
+@@ -950,6 +950,9 @@ static void display_cert_info(struct Curl_easy *data,
+ PR_Free(common_name);
+ }
+
++/* A number of certs that will never occur in a real server handshake */
++#define TOO_MANY_CERTS 300
++
+ static CURLcode display_conn_info(struct connectdata *conn, PRFileDesc *sock)
+ {
+ CURLcode result = CURLE_OK;
+@@ -986,6 +989,11 @@ static CURLcode display_conn_info(struct connectdata *conn, PRFileDesc *sock)
+ cert2 = CERT_FindCertIssuer(cert, now, certUsageSSLCA);
+ while(cert2) {
+ i++;
++ if(i >= TOO_MANY_CERTS) {
++ CERT_DestroyCertificate(cert2);
++ failf(data, "certificate loop");
++ return CURLE_SSL_CERTPROBLEM;
++ }
+ if(cert2->isRoot) {
+ CERT_DestroyCertificate(cert2);
+ break;
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch b/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch
new file mode 100644
index 0000000000..6b6d0e1938
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27782-1.patch
@@ -0,0 +1,363 @@
+From 907a16c832d9ce0ffa7e9b2297548063095a7242 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 23:13:53 +0200
+Subject: [PATCH] tls: check more TLS details for connection reuse
+
+CVE-2022-27782
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27782.html
+Closes #8825
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f18af4f874cecab82a9797e8c7541e0990c7a64c]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/setopt.c | 29 +++++++++++++++++------------
+ lib/url.c | 17 ++++++++++-------
+ lib/urldata.h | 13 +++++++------
+ lib/vtls/gtls.c | 30 ++++++++++++++++--------------
+ lib/vtls/mbedtls.c | 2 +-
+ lib/vtls/nss.c | 6 +++---
+ lib/vtls/openssl.c | 10 +++++-----
+ lib/vtls/vtls.c | 1 +
+ 8 files changed, 60 insertions(+), 48 deletions(-)
+
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 4648c87..bebb2e4 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2130,6 +2130,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+
+ case CURLOPT_SSL_OPTIONS:
+ arg = va_arg(param, long);
++ data->set.ssl.primary.ssl_options = (unsigned char)(arg & 0xff);
+ data->set.ssl.enable_beast =
+ (bool)((arg&CURLSSLOPT_ALLOW_BEAST) ? TRUE : FALSE);
+ data->set.ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
+@@ -2139,6 +2140,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ #ifndef CURL_DISABLE_PROXY
+ case CURLOPT_PROXY_SSL_OPTIONS:
+ arg = va_arg(param, long);
++ data->set.proxy_ssl.primary.ssl_options = (unsigned char)(arg & 0xff);
+ data->set.proxy_ssl.enable_beast =
+ (bool)((arg&CURLSSLOPT_ALLOW_BEAST) ? TRUE : FALSE);
+ data->set.proxy_ssl.no_revoke = !!(arg & CURLSSLOPT_NO_REVOKE);
+@@ -2541,44 +2543,47 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ case CURLOPT_TLSAUTH_USERNAME:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_ORIG],
+ va_arg(param, char *));
+- if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] &&
++ !data->set.ssl.primary.authtype)
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
+ break;
+ case CURLOPT_PROXY_TLSAUTH_USERNAME:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_USERNAME_PROXY],
+ va_arg(param, char *));
+ if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
+- !data->set.proxy_ssl.authtype)
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ !data->set.proxy_ssl.primary.authtype)
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to
++ SRP */
+ break;
+ case CURLOPT_TLSAUTH_PASSWORD:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_ORIG],
+ va_arg(param, char *));
+- if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] && !data->set.ssl.authtype)
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ if(data->set.str[STRING_TLSAUTH_USERNAME_ORIG] &&
++ !data->set.ssl.primary.authtype)
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
+ break;
+ case CURLOPT_PROXY_TLSAUTH_PASSWORD:
+ result = Curl_setstropt(&data->set.str[STRING_TLSAUTH_PASSWORD_PROXY],
+ va_arg(param, char *));
+ if(data->set.str[STRING_TLSAUTH_USERNAME_PROXY] &&
+- !data->set.proxy_ssl.authtype)
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP; /* default to SRP */
++ !data->set.proxy_ssl.primary.authtype)
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP; /* default */
+ break;
+ case CURLOPT_TLSAUTH_TYPE:
+ argptr = va_arg(param, char *);
+ if(!argptr ||
+ strncasecompare(argptr, "SRP", strlen("SRP")))
+- data->set.ssl.authtype = CURL_TLSAUTH_SRP;
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_SRP;
+ else
+- data->set.ssl.authtype = CURL_TLSAUTH_NONE;
++ data->set.ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ break;
+ case CURLOPT_PROXY_TLSAUTH_TYPE:
+ argptr = va_arg(param, char *);
+ if(!argptr ||
+ strncasecompare(argptr, "SRP", strlen("SRP")))
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_SRP;
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_SRP;
+ else
+- data->set.proxy_ssl.authtype = CURL_TLSAUTH_NONE;
++ data->set.proxy_ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ break;
+ #endif
+ #ifdef USE_ARES
+diff --git a/lib/url.c b/lib/url.c
+index efa3dc7..6518be9 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -482,7 +482,7 @@ CURLcode Curl_init_userdefined(struct Curl_easy *data)
+ set->ssl.primary.verifypeer = TRUE;
+ set->ssl.primary.verifyhost = TRUE;
+ #ifdef USE_TLS_SRP
+- set->ssl.authtype = CURL_TLSAUTH_NONE;
++ set->ssl.primary.authtype = CURL_TLSAUTH_NONE;
+ #endif
+ set->ssh_auth_types = CURLSSH_AUTH_DEFAULT; /* defaults to any auth
+ type */
+@@ -3594,8 +3594,9 @@ static CURLcode create_conn(struct Curl_easy *data,
+ data->set.proxy_ssl.primary.pinned_key =
+ data->set.str[STRING_SSL_PINNEDPUBLICKEY_PROXY];
+
+- data->set.ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
+- data->set.proxy_ssl.CRLfile = data->set.str[STRING_SSL_CRLFILE_PROXY];
++ data->set.ssl.primary.CRLfile = data->set.str[STRING_SSL_CRLFILE_ORIG];
++ data->set.proxy_ssl.primary.CRLfile =
++ data->set.str[STRING_SSL_CRLFILE_PROXY];
+ data->set.ssl.cert = data->set.str[STRING_CERT_ORIG];
+ data->set.proxy_ssl.cert = data->set.str[STRING_CERT_PROXY];
+ data->set.ssl.cert_type = data->set.str[STRING_CERT_TYPE_ORIG];
+@@ -3609,10 +3610,12 @@ static CURLcode create_conn(struct Curl_easy *data,
+ data->set.ssl.primary.clientcert = data->set.str[STRING_CERT_ORIG];
+ data->set.proxy_ssl.primary.clientcert = data->set.str[STRING_CERT_PROXY];
+ #ifdef USE_TLS_SRP
+- data->set.ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG];
+- data->set.proxy_ssl.username = data->set.str[STRING_TLSAUTH_USERNAME_PROXY];
+- data->set.ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG];
+- data->set.proxy_ssl.password = data->set.str[STRING_TLSAUTH_PASSWORD_PROXY];
++ data->set.ssl.primary.username = data->set.str[STRING_TLSAUTH_USERNAME_ORIG];
++ data->set.proxy_ssl.primary.username =
++ data->set.str[STRING_TLSAUTH_USERNAME_PROXY];
++ data->set.ssl.primary.password = data->set.str[STRING_TLSAUTH_PASSWORD_ORIG];
++ data->set.proxy_ssl.primary.password =
++ data->set.str[STRING_TLSAUTH_PASSWORD_PROXY];
+ #endif
+
+ if(!Curl_clone_primary_ssl_config(&data->set.ssl.primary,
+diff --git a/lib/urldata.h b/lib/urldata.h
+index ab1b267..ad0ef8f 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -231,6 +231,13 @@ struct ssl_primary_config {
+ char *cipher_list; /* list of ciphers to use */
+ char *cipher_list13; /* list of TLS 1.3 cipher suites to use */
+ char *pinned_key;
++ char *CRLfile; /* CRL to check certificate revocation */
++ #ifdef USE_TLS_SRP
++ char *username; /* TLS username (for, e.g., SRP) */
++ char *password; /* TLS password (for, e.g., SRP) */
++ enum CURL_TLSAUTH authtype; /* TLS authentication type (default SRP) */
++ #endif
++ unsigned char ssl_options; /* the CURLOPT_SSL_OPTIONS bitmask */
+ BIT(verifypeer); /* set TRUE if this is desired */
+ BIT(verifyhost); /* set TRUE if CN/SAN must match hostname */
+ BIT(verifystatus); /* set TRUE if certificate status must be checked */
+@@ -240,7 +247,6 @@ struct ssl_primary_config {
+ struct ssl_config_data {
+ struct ssl_primary_config primary;
+ long certverifyresult; /* result from the certificate verification */
+- char *CRLfile; /* CRL to check certificate revocation */
+ curl_ssl_ctx_callback fsslctx; /* function to initialize ssl ctx */
+ void *fsslctxp; /* parameter for call back */
+ char *cert; /* client certificate file name */
+@@ -248,11 +254,6 @@ struct ssl_config_data {
+ char *key; /* private key file name */
+ char *key_type; /* format for private key (default: PEM) */
+ char *key_passwd; /* plain text private key password */
+-#ifdef USE_TLS_SRP
+- char *username; /* TLS username (for, e.g., SRP) */
+- char *password; /* TLS password (for, e.g., SRP) */
+- enum CURL_TLSAUTH authtype; /* TLS authentication type (default SRP) */
+-#endif
+ BIT(certinfo); /* gather lots of certificate info */
+ BIT(falsestart);
+ BIT(enable_beast); /* allow this flaw for interoperability's sake*/
+diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c
+index 3d0758d..92c301c 100644
+--- a/lib/vtls/gtls.c
++++ b/lib/vtls/gtls.c
+@@ -581,9 +581,10 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ #ifdef USE_TLS_SRP
+- if((SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) &&
++ if((SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP) &&
+ Curl_allow_auth_to_host(data)) {
+- infof(data, "Using TLS-SRP username: %s\n", SSL_SET_OPTION(username));
++ infof(data, "Using TLS-SRP username: %s\n",
++ SSL_SET_OPTION(primary.username));
+
+ rc = gnutls_srp_allocate_client_credentials(&BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+@@ -593,8 +594,8 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+
+ rc = gnutls_srp_set_client_credentials(BACKEND->srp_client_cred,
+- SSL_SET_OPTION(username),
+- SSL_SET_OPTION(password));
++ SSL_SET_OPTION(primary.username),
++ SSL_SET_OPTION(primary.password));
+ if(rc != GNUTLS_E_SUCCESS) {
+ failf(data, "gnutls_srp_set_client_cred() failed: %s",
+ gnutls_strerror(rc));
+@@ -648,19 +649,19 @@ gtls_connect_step1(struct connectdata *conn,
+ }
+ #endif
+
+- if(SSL_SET_OPTION(CRLfile)) {
++ if(SSL_SET_OPTION(primary.CRLfile)) {
+ /* set the CRL list file */
+ rc = gnutls_certificate_set_x509_crl_file(BACKEND->cred,
+- SSL_SET_OPTION(CRLfile),
++ SSL_SET_OPTION(primary.CRLfile),
+ GNUTLS_X509_FMT_PEM);
+ if(rc < 0) {
+ failf(data, "error reading crl file %s (%s)",
+- SSL_SET_OPTION(CRLfile), gnutls_strerror(rc));
++ SSL_SET_OPTION(primary.CRLfile), gnutls_strerror(rc));
+ return CURLE_SSL_CRL_BADFILE;
+ }
+ else
+ infof(data, "found %d CRL in %s\n",
+- rc, SSL_SET_OPTION(CRLfile));
++ rc, SSL_SET_OPTION(primary.CRLfile));
+ }
+
+ /* Initialize TLS session as a client */
+@@ -879,7 +880,7 @@ gtls_connect_step1(struct connectdata *conn,
+
+ #ifdef USE_TLS_SRP
+ /* put the credentials to the current session */
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP) {
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP) {
+ rc = gnutls_credentials_set(session, GNUTLS_CRD_SRP,
+ BACKEND->srp_client_cred);
+ if(rc != GNUTLS_E_SUCCESS) {
+@@ -1061,8 +1062,8 @@ gtls_connect_step3(struct connectdata *conn,
+ SSL_CONN_CONFIG(verifyhost) ||
+ SSL_CONN_CONFIG(issuercert)) {
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP
+- && SSL_SET_OPTION(username) != NULL
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP
++ && SSL_SET_OPTION(primary.username) != NULL
+ && !SSL_CONN_CONFIG(verifypeer)
+ && gnutls_cipher_get(session)) {
+ /* no peer cert, but auth is ok if we have SRP user and cipher and no
+@@ -1116,7 +1117,8 @@ gtls_connect_step3(struct connectdata *conn,
+ failf(data, "server certificate verification failed. CAfile: %s "
+ "CRLfile: %s", SSL_CONN_CONFIG(CAfile) ? SSL_CONN_CONFIG(CAfile):
+ "none",
+- SSL_SET_OPTION(CRLfile)?SSL_SET_OPTION(CRLfile):"none");
++ SSL_SET_OPTION(primary.CRLfile) ?
++ SSL_SET_OPTION(primary.CRLfile) : "none");
+ return CURLE_PEER_FAILED_VERIFICATION;
+ }
+ else
+@@ -1703,8 +1705,8 @@ static int Curl_gtls_shutdown(struct connectdata *conn, int sockindex)
+ gnutls_certificate_free_credentials(BACKEND->cred);
+
+ #ifdef USE_TLS_SRP
+- if(SSL_SET_OPTION(authtype) == CURL_TLSAUTH_SRP
+- && SSL_SET_OPTION(username) != NULL)
++ if(SSL_SET_OPTION(primary.authtype) == CURL_TLSAUTH_SRP
++ && SSL_SET_OPTION(primary.username) != NULL)
+ gnutls_srp_free_client_credentials(BACKEND->srp_client_cred);
+ #endif
+
+diff --git a/lib/vtls/mbedtls.c b/lib/vtls/mbedtls.c
+index 19df847..62d2b00 100644
+--- a/lib/vtls/mbedtls.c
++++ b/lib/vtls/mbedtls.c
+@@ -245,7 +245,7 @@ mbed_connect_step1(struct connectdata *conn,
+ const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
+ const char * const ssl_capath = SSL_CONN_CONFIG(CApath);
+ char * const ssl_cert = SSL_SET_OPTION(cert);
+- const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile);
++ const char * const ssl_crlfile = SSL_SET_OPTION(primary.CRLfile);
+ const char * const hostname = SSL_IS_PROXY() ? conn->http_proxy.host.name :
+ conn->host.name;
+ const long int port = SSL_IS_PROXY() ? conn->port : conn->remote_port;
+diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c
+index 86102f7..62fd7a2 100644
+--- a/lib/vtls/nss.c
++++ b/lib/vtls/nss.c
+@@ -1955,13 +1955,13 @@ static CURLcode nss_setup_connect(struct connectdata *conn, int sockindex)
+ }
+ }
+
+- if(SSL_SET_OPTION(CRLfile)) {
+- const CURLcode rv = nss_load_crl(SSL_SET_OPTION(CRLfile));
++ if(SSL_SET_OPTION(primary.CRLfile)) {
++ const CURLcode rv = nss_load_crl(SSL_SET_OPTION(primary.CRLfile));
+ if(rv) {
+ result = rv;
+ goto error;
+ }
+- infof(data, " CRLfile: %s\n", SSL_SET_OPTION(CRLfile));
++ infof(data, " CRLfile: %s\n", SSL_SET_OPTION(primary.CRLfile));
+ }
+
+ if(SSL_SET_OPTION(cert)) {
+diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c
+index a14cecc..ec5a8f5 100644
+--- a/lib/vtls/openssl.c
++++ b/lib/vtls/openssl.c
+@@ -2454,14 +2454,14 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ &data->set.proxy_ssl.certverifyresult : &data->set.ssl.certverifyresult;
+ const long int ssl_version = SSL_CONN_CONFIG(version);
+ #ifdef USE_TLS_SRP
+- const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(authtype);
++ const enum CURL_TLSAUTH ssl_authtype = SSL_SET_OPTION(primary.authtype);
+ #endif
+ char * const ssl_cert = SSL_SET_OPTION(cert);
+ const char * const ssl_cert_type = SSL_SET_OPTION(cert_type);
+ const char * const ssl_cafile = SSL_CONN_CONFIG(CAfile);
+ const char * const ssl_capath = SSL_CONN_CONFIG(CApath);
+ const bool verifypeer = SSL_CONN_CONFIG(verifypeer);
+- const char * const ssl_crlfile = SSL_SET_OPTION(CRLfile);
++ const char * const ssl_crlfile = SSL_SET_OPTION(primary.CRLfile);
+ char error_buffer[256];
+
+ DEBUGASSERT(ssl_connect_1 == connssl->connecting_state);
+@@ -2741,15 +2741,15 @@ static CURLcode ossl_connect_step1(struct connectdata *conn, int sockindex)
+ #ifdef USE_TLS_SRP
+ if((ssl_authtype == CURL_TLSAUTH_SRP) &&
+ Curl_allow_auth_to_host(data)) {
+- char * const ssl_username = SSL_SET_OPTION(username);
+-
++ char * const ssl_username = SSL_SET_OPTION(primary.username);
++ char * const ssl_password = SSL_SET_OPTION(primary.password);
+ infof(data, "Using TLS-SRP username: %s\n", ssl_username);
+
+ if(!SSL_CTX_set_srp_username(BACKEND->ctx, ssl_username)) {
+ failf(data, "Unable to set SRP user name");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+- if(!SSL_CTX_set_srp_password(BACKEND->ctx, SSL_SET_OPTION(password))) {
++ if(!SSL_CTX_set_srp_password(BACKEND->ctx, ssl_password)) {
+ failf(data, "failed setting SRP password");
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ }
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index e38f74e..e8cb70f 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -89,6 +89,7 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ {
+ if((data->version == needle->version) &&
+ (data->version_max == needle->version_max) &&
++ (data->ssl_options == needle->ssl_options) &&
+ (data->verifypeer == needle->verifypeer) &&
+ (data->verifyhost == needle->verifyhost) &&
+ (data->verifystatus == needle->verifystatus) &&
diff --git a/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch b/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch
new file mode 100644
index 0000000000..3d56025210
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-27782-2.patch
@@ -0,0 +1,71 @@
+From 0a115a8903dffc7f723d1d4d71fb821d69eb8761 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 9 May 2022 23:13:53 +0200
+Subject: [PATCH] url: check SSH config match on connection reuse
+
+CVE-2022-27782
+
+Reported-by: Harry Sintonen
+Bug: https://curl.se/docs/CVE-2022-27782.html
+Closes #8825
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/1645e9b44505abd5cbaf65da5282c3f33b5924a5]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/url.c | 11 +++++++++++
+ lib/vssh/ssh.h | 6 +++---
+ 2 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 6518be9..8da0245 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1027,6 +1027,12 @@ static void prune_dead_connections(struct Curl_easy *data)
+ }
+ }
+
++static bool ssh_config_matches(struct connectdata *one,
++ struct connectdata *two)
++{
++ return (Curl_safecmp(one->proto.sshc.rsa, two->proto.sshc.rsa) &&
++ Curl_safecmp(one->proto.sshc.rsa_pub, two->proto.sshc.rsa_pub));
++}
+ /*
+ * Given one filled in connection struct (named needle), this function should
+ * detect if there already is one that has all the significant details
+@@ -1260,6 +1266,11 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
++ if(get_protocol_family(needle->handler->protocol) == PROTO_FAMILY_SSH) {
++ if(!ssh_config_matches(needle, check))
++ continue;
++ }
++
+ if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) ||
+ needle->bits.tunnel_proxy) {
+ /* The requested connection does not use a HTTP proxy or it uses SSL or
+diff --git a/lib/vssh/ssh.h b/lib/vssh/ssh.h
+index 0d4ee52..8f2632e 100644
+--- a/lib/vssh/ssh.h
++++ b/lib/vssh/ssh.h
+@@ -7,7 +7,7 @@
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+- * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+@@ -120,8 +120,8 @@ struct ssh_conn {
+
+ /* common */
+ const char *passphrase; /* pass-phrase to use */
+- char *rsa_pub; /* path name */
+- char *rsa; /* path name */
++ char *rsa_pub; /* strdup'ed public key file */
++ char *rsa; /* strdup'ed private key file */
+ bool authed; /* the connection has been authenticated fine */
+ sshstate state; /* always use ssh.c:state() to change state! */
+ sshstate nextstate; /* the state to goto after stopping */
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32206.patch b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
new file mode 100644
index 0000000000..3d76aeb43d
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32206.patch
@@ -0,0 +1,52 @@
+From 25e7be39be5f8ed696b6085ced9cf6c17e6128f4 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 16 May 2022 16:28:13 +0200
+Subject: [PATCH] content_encoding: return error on too many compression steps
+
+The max allowed steps is arbitrarily set to 5.
+
+Bug: https://curl.se/docs/CVE-2022-32206.html
+CVE-2022-32206
+Reported-by: Harry Sintonen
+Closes #9049
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/3a09fbb7f264c67c43]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/content_encoding.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/lib/content_encoding.c b/lib/content_encoding.c
+index 6d47537..91e621f 100644
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -934,6 +934,9 @@ static const content_encoding *find_encoding(const char *name, size_t len)
+ return NULL;
+ }
+
++/* allow no more than 5 "chained" compression steps */
++#define MAX_ENCODE_STACK 5
++
+ /* Set-up the unencoding stack from the Content-Encoding header value.
+ * See RFC 7231 section 3.1.2.2. */
+ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+@@ -941,6 +944,7 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ {
+ struct Curl_easy *data = conn->data;
+ struct SingleRequest *k = &data->req;
++ int counter = 0;
+
+ do {
+ const char *name;
+@@ -975,6 +979,11 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
++ if(++counter >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to %u content encodings",
++ counter);
++ return CURLE_BAD_CONTENT_ENCODING;
++ }
+ /* Stack the unencoding stage. */
+ writer = new_unencoding_writer(conn, encoding, k->writer_stack);
+ if(!writer)
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32207.patch b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
new file mode 100644
index 0000000000..f75aaecd64
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32207.patch
@@ -0,0 +1,284 @@
+From af92181055d7d64dfc0bc9d5a13c8b98af3196be Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Wed, 25 May 2022 10:09:53 +0200
+Subject: [PATCH] fopen: add Curl_fopen() for better overwriting of files
+
+Bug: https://curl.se/docs/CVE-2022-32207.html
+CVE-2022-32207
+Reported-by: Harry Sintonen
+Closes #9050
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/20f9dd6bae50b]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ CMakeLists.txt | 1 +
+ configure.ac | 1 +
+ lib/Makefile.inc | 4 +-
+ lib/cookie.c | 19 ++-----
+ lib/curl_config.h.cmake | 3 ++
+ lib/fopen.c | 113 ++++++++++++++++++++++++++++++++++++++++
+ lib/fopen.h | 30 +++++++++++
+ 7 files changed, 155 insertions(+), 16 deletions(-)
+ create mode 100644 lib/fopen.c
+ create mode 100644 lib/fopen.h
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 73b053b..cc587b0 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -869,6 +869,7 @@ elseif(HAVE_LIBSOCKET)
+ set(CMAKE_REQUIRED_LIBRARIES socket)
+ endif()
+
++check_symbol_exists(fchmod "${CURL_INCLUDES}" HAVE_FCHMOD)
+ check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
+ check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
+ check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
+diff --git a/configure.ac b/configure.ac
+index d090622..7071077 100755
+--- a/configure.ac
++++ b/configure.ac
+@@ -4059,6 +4059,7 @@ AC_CHECK_DECLS([getpwuid_r], [], [AC_DEFINE(HAVE_DECL_GETPWUID_R_MISSING, 1, "Se
+
+
+ AC_CHECK_FUNCS([fnmatch \
++ fchmod \
+ geteuid \
+ getpass_r \
+ getppid \
+diff --git a/lib/Makefile.inc b/lib/Makefile.inc
+index 46ded90..79307d8 100644
+--- a/lib/Makefile.inc
++++ b/lib/Makefile.inc
+@@ -63,7 +63,7 @@ LIB_CFILES = file.c timeval.c base64.c hostip.c progress.c formdata.c \
+ curl_multibyte.c hostcheck.c conncache.c dotdot.c \
+ x509asn1.c http2.c smb.c curl_endian.c curl_des.c system_win32.c \
+ mime.c sha256.c setopt.c curl_path.c curl_ctype.c curl_range.c psl.c \
+- doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c rename.c
++ doh.c urlapi.c curl_get_line.c altsvc.c socketpair.c rename.c fopen.c
+
+ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+ formdata.h cookie.h http.h sendf.h ftp.h url.h dict.h if2ip.h \
+@@ -84,7 +84,7 @@ LIB_HFILES = arpa_telnet.h netrc.h file.h timeval.h hostip.h progress.h \
+ x509asn1.h http2.h sigpipe.h smb.h curl_endian.h curl_des.h \
+ curl_printf.h system_win32.h rand.h mime.h curl_sha256.h setopt.h \
+ curl_path.h curl_ctype.h curl_range.h psl.h doh.h urlapi-int.h \
+- curl_get_line.h altsvc.h quic.h socketpair.h rename.h
++ curl_get_line.h altsvc.h quic.h socketpair.h rename.h fopen.h
+
+ LIB_RCFILES = libcurl.rc
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 68054e1..a9ad20a 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -97,8 +97,8 @@ Example set of cookies:
+ #include "curl_memrchr.h"
+ #include "inet_pton.h"
+ #include "parsedate.h"
+-#include "rand.h"
+ #include "rename.h"
++#include "fopen.h"
+
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -1524,18 +1524,9 @@ static int cookie_output(struct Curl_easy *data,
+ use_stdout = TRUE;
+ }
+ else {
+- unsigned char randsuffix[9];
+-
+- if(Curl_rand_hex(data, randsuffix, sizeof(randsuffix)))
+- return 2;
+-
+- tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
+- if(!tempstore)
+- return 1;
+-
+- out = fopen(tempstore, FOPEN_WRITETEXT);
+- if(!out)
+- goto error;
++ error = Curl_fopen(data, filename, &out, &tempstore);
++ if(error)
++ goto error;
+ }
+
+ fputs("# Netscape HTTP Cookie File\n"
+@@ -1581,7 +1572,7 @@ static int cookie_output(struct Curl_easy *data,
+ if(!use_stdout) {
+ fclose(out);
+ out = NULL;
+- if(Curl_rename(tempstore, filename)) {
++ if(tempstore && Curl_rename(tempstore, filename)) {
+ unlink(tempstore);
+ goto error;
+ }
+diff --git a/lib/curl_config.h.cmake b/lib/curl_config.h.cmake
+index 98cdf51..fe43751 100644
+--- a/lib/curl_config.h.cmake
++++ b/lib/curl_config.h.cmake
+@@ -124,6 +124,9 @@
+ /* Define to 1 if you have the <assert.h> header file. */
+ #cmakedefine HAVE_ASSERT_H 1
+
++/* Define to 1 if you have the `fchmod' function. */
++#cmakedefine HAVE_FCHMOD 1
++
+ /* Define to 1 if you have the `basename' function. */
+ #cmakedefine HAVE_BASENAME 1
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+new file mode 100644
+index 0000000..ad3691b
+--- /dev/null
++++ b/lib/fopen.c
+@@ -0,0 +1,113 @@
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#if !defined(CURL_DISABLE_COOKIES) || !defined(CURL_DISABLE_ALTSVC) || \
++ !defined(CURL_DISABLE_HSTS)
++
++#ifdef HAVE_FCNTL_H
++#include <fcntl.h>
++#endif
++
++#include "urldata.h"
++#include "rand.h"
++#include "fopen.h"
++/* The last 3 #include files should be in this order */
++#include "curl_printf.h"
++#include "curl_memory.h"
++#include "memdebug.h"
++
++/*
++ * Curl_fopen() opens a file for writing with a temp name, to be renamed
++ * to the final name when completed. If there is an existing file using this
++ * name at the time of the open, this function will clone the mode from that
++ * file. if 'tempname' is non-NULL, it needs a rename after the file is
++ * written.
++ */
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname)
++{
++ CURLcode result = CURLE_WRITE_ERROR;
++ unsigned char randsuffix[9];
++ char *tempstore = NULL;
++ struct_stat sb;
++ int fd = -1;
++ *tempname = NULL;
++
++ if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
++ /* a non-regular file, fallback to direct fopen() */
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(*fh)
++ return CURLE_OK;
++ goto fail;
++ }
++
++ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
++ if(result)
++ goto fail;
++
++ tempstore = aprintf("%s.%s.tmp", filename, randsuffix);
++ if(!tempstore) {
++ result = CURLE_OUT_OF_MEMORY;
++ goto fail;
++ }
++
++ result = CURLE_WRITE_ERROR;
++ fd = open(tempstore, O_WRONLY | O_CREAT | O_EXCL, 0600);
++ if(fd == -1)
++ goto fail;
++
++#ifdef HAVE_FCHMOD
++ {
++ struct_stat nsb;
++ if((fstat(fd, &nsb) != -1) &&
++ (nsb.st_uid == sb.st_uid) && (nsb.st_gid == sb.st_gid)) {
++ /* if the user and group are the same, clone the original mode */
++ if(fchmod(fd, sb.st_mode) == -1)
++ goto fail;
++ }
++ }
++#endif
++
++ *fh = fdopen(fd, FOPEN_WRITETEXT);
++ if(!*fh)
++ goto fail;
++
++ *tempname = tempstore;
++ return CURLE_OK;
++
++fail:
++ if(fd != -1) {
++ close(fd);
++ unlink(tempstore);
++ }
++
++ free(tempstore);
++
++ *tempname = NULL;
++ return result;
++}
++
++#endif /* ! disabled */
+diff --git a/lib/fopen.h b/lib/fopen.h
+new file mode 100644
+index 0000000..289e55f
+--- /dev/null
++++ b/lib/fopen.h
+@@ -0,0 +1,30 @@
++#ifndef HEADER_CURL_FOPEN_H
++#define HEADER_CURL_FOPEN_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) 1998 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
++ FILE **fh, char **tempname);
++
++#endif
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32208.patch b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
new file mode 100644
index 0000000000..2939314d09
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32208.patch
@@ -0,0 +1,72 @@
+From 3b90f0b2a7a84645acce151c86b40d25b5de6615 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Jun 2022 09:27:24 +0200
+Subject: [PATCH] krb5: return error properly on decode errors
+
+Bug: https://curl.se/docs/CVE-2022-32208.html
+CVE-2022-32208
+Reported-by: Harry Sintonen
+Closes #9051
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/6ecdf5136b52af7]
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/krb5.c | 5 +----
+ lib/security.c | 13 ++++++++++---
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/lib/krb5.c b/lib/krb5.c
+index f50287a..5b77e35 100644
+--- a/lib/krb5.c
++++ b/lib/krb5.c
+@@ -86,11 +86,8 @@ krb5_decode(void *app_data, void *buf, int len,
+ enc.value = buf;
+ enc.length = len;
+ maj = gss_unwrap(&min, *context, &enc, &dec, NULL, NULL);
+- if(maj != GSS_S_COMPLETE) {
+- if(len >= 4)
+- strcpy(buf, "599 ");
++ if(maj != GSS_S_COMPLETE)
+ return -1;
+- }
+
+ memcpy(buf, dec.value, dec.length);
+ len = curlx_uztosi(dec.length);
+diff --git a/lib/security.c b/lib/security.c
+index fbfa707..3542210 100644
+--- a/lib/security.c
++++ b/lib/security.c
+@@ -192,6 +192,7 @@ static CURLcode read_data(struct connectdata *conn,
+ {
+ int len;
+ CURLcode result;
++ int nread;
+
+ result = socket_read(fd, &len, sizeof(len));
+ if(result)
+@@ -200,7 +201,10 @@ static CURLcode read_data(struct connectdata *conn,
+ if(len) {
+ /* only realloc if there was a length */
+ len = ntohl(len);
+- buf->data = Curl_saferealloc(buf->data, len);
++ if(len > CURL_MAX_INPUT_LENGTH)
++ len = 0;
++ else
++ buf->data = Curl_saferealloc(buf->data, len);
+ }
+ if(!len || !buf->data)
+ return CURLE_OUT_OF_MEMORY;
+@@ -208,8 +212,11 @@ static CURLcode read_data(struct connectdata *conn,
+ result = socket_read(fd, buf->data, len);
+ if(result)
+ return result;
+- buf->size = conn->mech->decode(conn->app_data, buf->data, len,
+- conn->data_prot, conn);
++ nread = buf->size = conn->mech->decode(conn->app_data, buf->data, len,
++ conn->data_prot, conn);
++ if(nread < 0)
++ return CURLE_RECV_ERROR;
++ buf->size = (size_t)nread;
+ buf->index = 0;
+ return CURLE_OK;
+ }
diff --git a/meta/recipes-support/curl/curl/CVE-2022-32221.patch b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
new file mode 100644
index 0000000000..8e662abd3a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-32221.patch
@@ -0,0 +1,29 @@
+From 75c04a3e75e8e3025a17ca3033ca307da9691cd0 Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Fri, 11 Nov 2022 10:49:58 +0530
+Subject: [PATCH] CVE-2022-32221
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/a64e3e59938abd7d6]
+CVE: CVE-2022-32221
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+setopt: when POST is set, reset the 'upload' field.
+---
+ lib/setopt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/setopt.c b/lib/setopt.c
+index bebb2e4..4d96f6b 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -486,6 +486,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ }
+ else
+ data->set.httpreq = HTTPREQ_GET;
++ data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_COPYPOSTFIELDS:
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-35252.patch b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
new file mode 100644
index 0000000000..a5160c01f4
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-35252.patch
@@ -0,0 +1,72 @@
+From c9212bdb21f0cc90a1a60dfdbb716deefe78fd40 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 29 Aug 2022 00:09:17 +0200
+Subject: [PATCH] cookie: reject cookies with "control bytes"
+
+Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
+
+Reported-by: Axel Chong
+
+Bug: https://curl.se/docs/CVE-2022-35252.html
+
+CVE-2022-35252
+
+Closes #9381
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/8dfc93e573ca740544a2d79ebb]
+
+Signed-off-by: Robert Joslyn <robert.joslyn@redrectangle.org>
+---
+ lib/cookie.c | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index a9ad20a..66c7715 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -412,6 +412,30 @@ static bool bad_domain(const char *domain)
+ return !strchr(domain, '.') && !strcasecompare(domain, "localhost");
+ }
+
++/*
++ RFC 6265 section 4.1.1 says a server should accept this range:
++
++ cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
++
++ But Firefox and Chrome as of June 2022 accept space, comma and double-quotes
++ fine. The prime reason for filtering out control bytes is that some HTTP
++ servers return 400 for requests that contain such.
++*/
++static int invalid_octets(const char *p)
++{
++ /* Reject all bytes \x01 - \x1f (*except* \x09, TAB) + \x7f */
++ static const char badoctets[] = {
++ "\x01\x02\x03\x04\x05\x06\x07\x08\x0a"
++ "\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14"
++ "\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f"
++ };
++ size_t vlen, len;
++ /* scan for all the octets that are *not* in cookie-octet */
++ len = strcspn(p, badoctets);
++ vlen = strlen(p);
++ return (len != vlen);
++}
++
+ /****************************************************************************
+ *
+ * Curl_cookie_add()
+@@ -558,6 +582,11 @@ Curl_cookie_add(struct Curl_easy *data,
+ badcookie = TRUE;
+ break;
+ }
++ if(invalid_octets(whatptr) || invalid_octets(name)) {
++ infof(data, "invalid octets in name/value, cookie dropped");
++ badcookie = TRUE;
++ break;
++ }
+ }
+ else if(!len) {
+ /* this was a "<name>=" with no content, and we must allow
+--
+2.35.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-35260.patch b/meta/recipes-support/curl/curl/CVE-2022-35260.patch
new file mode 100644
index 0000000000..476c996b0a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-35260.patch
@@ -0,0 +1,68 @@
+From 3ff3989ec53d9ddcf4bdd99f5d5788dd87486768 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 4 Oct 2022 14:37:24 +0200
+Subject: [PATCH] netrc: replace fgets with Curl_get_line
+
+Upstream-Status: Backport
+CVE: CVE-2022-35260
+Reference to upstream patch: https://github.com/curl/curl/commit/c97ec984fb2bc919a3aa863e0476dffa377b184c
+
+Make the parser only accept complete lines and avoid problems with
+overly long lines.
+
+Reported-by: Hiroki Kurosawa
+
+Closes #9789
+---
+ lib/curl_get_line.c | 4 ++--
+ lib/netrc.c | 5 +++--
+ 2 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/lib/curl_get_line.c b/lib/curl_get_line.c
+index c4194851ae09..4b9eea9e631c 100644
+--- a/lib/curl_get_line.c
++++ b/lib/curl_get_line.c
+@@ -28,8 +28,8 @@
+ #include "memdebug.h"
+
+ /*
+- * get_line() makes sure to only return complete whole lines that fit in 'len'
+- * bytes and end with a newline.
++ * Curl_get_line() makes sure to only return complete whole lines that fit in
++ * 'len' bytes and end with a newline.
+ */
+ char *Curl_get_line(char *buf, int len, FILE *input)
+ {
+diff --git a/lib/netrc.c b/lib/netrc.c
+index 1c9da31993c9..93239132c9d8 100644
+--- a/lib/netrc.c
++++ b/lib/netrc.c
+@@ -31,6 +31,7 @@
+ #include "netrc.h"
+ #include "strtok.h"
+ #include "strcase.h"
++#include "curl_get_line.h"
+
+ /* The last 3 #include files should be in this order */
+ #include "curl_printf.h"
+@@ -83,7 +84,7 @@ static int parsenetrc(const char *host,
+ char netrcbuffer[4096];
+ int netrcbuffsize = (int)sizeof(netrcbuffer);
+
+- while(!done && fgets(netrcbuffer, netrcbuffsize, file)) {
++ while(!done && Curl_get_line(netrcbuffer, netrcbuffsize, file)) {
+ tok = strtok_r(netrcbuffer, " \t\n", &tok_buf);
+ if(tok && *tok == '#')
+ /* treat an initial hash as a comment line */
+@@ -169,7 +170,7 @@ static int parsenetrc(const char *host,
+
+ tok = strtok_r(NULL, " \t\n", &tok_buf);
+ } /* while(tok) */
+- } /* while fgets() */
++ } /* while Curl_get_line() */
+
+ out:
+ if(!retcode) {
+--
+2.34.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2022-43552.patch b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
new file mode 100644
index 0000000000..d729441454
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2022-43552.patch
@@ -0,0 +1,82 @@
+rom 4f20188ac644afe174be6005ef4f6ffba232b8b2 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 19 Dec 2022 08:38:37 +0100
+Subject: [PATCH] smb/telnet: do not free the protocol struct in *_done()
+
+It is managed by the generic layer.
+
+Reported-by: Trail of Bits
+
+Closes #10112
+
+CVE: CVE-2022-43552
+Upstream-Status: Backport [https://github.com/curl/curl/commit/4f20188ac644afe174be6005ef4f6ffba232b8b2]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/smb.c | 14 ++------------
+ lib/telnet.c | 3 ---
+ 2 files changed, 2 insertions(+), 15 deletions(-)
+
+diff --git a/lib/smb.c b/lib/smb.c
+index 12f9925..8db3b27 100644
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -61,8 +61,6 @@ static CURLcode smb_connect(struct connectdata *conn, bool *done);
+ static CURLcode smb_connection_state(struct connectdata *conn, bool *done);
+ static CURLcode smb_do(struct connectdata *conn, bool *done);
+ static CURLcode smb_request_state(struct connectdata *conn, bool *done);
+-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
+- bool premature);
+ static CURLcode smb_disconnect(struct connectdata *conn, bool dead);
+ static int smb_getsock(struct connectdata *conn, curl_socket_t *socks);
+ static CURLcode smb_parse_url_path(struct connectdata *conn);
+@@ -74,7 +72,7 @@ const struct Curl_handler Curl_handler_smb = {
+ "SMB", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -99,7 +97,7 @@ const struct Curl_handler Curl_handler_smbs = {
+ "SMBS", /* scheme */
+ smb_setup_connection, /* setup_connection */
+ smb_do, /* do_it */
+- smb_done, /* done */
++ ZERO_NULL, /* done */
+ ZERO_NULL, /* do_more */
+ smb_connect, /* connect_it */
+ smb_connection_state, /* connecting */
+@@ -919,14 +917,6 @@ static CURLcode smb_request_state(struct connectdata *conn, bool *done)
+ return CURLE_OK;
+ }
+
+-static CURLcode smb_done(struct connectdata *conn, CURLcode status,
+- bool premature)
+-{
+- (void) premature;
+- Curl_safefree(conn->data->req.protop);
+- return status;
+-}
+-
+ static CURLcode smb_disconnect(struct connectdata *conn, bool dead)
+ {
+ struct smb_conn *smbc = &conn->proto.smbc;
+diff --git a/lib/telnet.c b/lib/telnet.c
+index 3347ad6..e3b9208 100644
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -1294,9 +1294,6 @@ static CURLcode telnet_done(struct connectdata *conn,
+
+ curl_slist_free_all(tn->telnet_vars);
+ tn->telnet_vars = NULL;
+-
+- Curl_safefree(conn->data->req.protop);
+-
+ return CURLE_OK;
+ }
+
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-23916.patch b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
new file mode 100644
index 0000000000..054615963e
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-23916.patch
@@ -0,0 +1,231 @@
+From 119fb187192a9ea13dc90d9d20c215fc82799ab9 Mon Sep 17 00:00:00 2001
+From: Patrick Monnerat <patrick@monnerat.net>
+Date: Mon, 13 Feb 2023 08:33:09 +0100
+Subject: [PATCH] content_encoding: do not reset stage counter for each header
+
+Test 418 verifies
+
+Closes #10492
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/119fb187192a9ea13dc90d9d20c215fc82799ab9]
+CVE: CVE-2023-23916
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/content_encoding.c | 7 +-
+ lib/urldata.h | 1 +
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test418 | 152 ++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 157 insertions(+), 5 deletions(-)
+ create mode 100644 tests/data/test418
+
+diff --git a/lib/content_encoding.c b/lib/content_encoding.c
+index 91e621f..7e098a5 100644
+--- a/lib/content_encoding.c
++++ b/lib/content_encoding.c
+@@ -944,7 +944,6 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ {
+ struct Curl_easy *data = conn->data;
+ struct SingleRequest *k = &data->req;
+- int counter = 0;
+
+ do {
+ const char *name;
+@@ -979,9 +978,9 @@ CURLcode Curl_build_unencoding_stack(struct connectdata *conn,
+ if(!encoding)
+ encoding = &error_encoding; /* Defer error at stack use. */
+
+- if(++counter >= MAX_ENCODE_STACK) {
+- failf(data, "Reject response due to %u content encodings",
+- counter);
++ if(k->writer_stack_depth++ >= MAX_ENCODE_STACK) {
++ failf(data, "Reject response due to more than %u content encodings",
++ MAX_ENCODE_STACK);
+ return CURLE_BAD_CONTENT_ENCODING;
+ }
+ /* Stack the unencoding stage. */
+diff --git a/lib/urldata.h b/lib/urldata.h
+index ad0ef8f..168f874 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -648,6 +648,7 @@ struct SingleRequest {
+ #ifndef CURL_DISABLE_DOH
+ struct dohdata doh; /* DoH specific data for this request */
+ #endif
++ unsigned char writer_stack_depth; /* Unencoding stack depth. */
+ BIT(header); /* incoming data has HTTP header */
+ BIT(content_range); /* set TRUE if Content-Range: was found */
+ BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 60e8176..40de8bc 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -63,7 +63,7 @@ test350 test351 test352 test353 test354 test355 test356 test357 \
+ test393 test394 test395 \
+ \
+ test400 test401 test402 test403 test404 test405 test406 test407 test408 \
+-test409 \
++test409 test418 \
+ \
+ test490 test491 test492 \
+ \
+diff --git a/tests/data/test418 b/tests/data/test418
+new file mode 100644
+index 0000000..50e974e
+--- /dev/null
++++ b/tests/data/test418
+@@ -0,0 +1,152 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++gzip
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++<data nocheck="yes">
++HTTP/1.1 200 OK
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++Transfer-Encoding: gzip
++
++-foo-
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<server>
++http
++</server>
++ <name>
++Response with multiple Transfer-Encoding headers
++ </name>
++ <command>
++http://%HOSTIP:%HTTPPORT/%TESTNUMBER -sS
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<protocol crlf="yes">
++GET /%TESTNUMBER HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++User-Agent: curl/%VERSION
++Accept: */*
++
++</protocol>
++
++# CURLE_BAD_CONTENT_ENCODING is 61
++<errorcode>
++61
++</errorcode>
++<stderr mode="text">
++curl: (61) Reject response due to more than 5 content encodings
++</stderr>
++</verify>
++</testcase>
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27533.patch b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
new file mode 100644
index 0000000000..64ba135056
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27533.patch
@@ -0,0 +1,59 @@
+Backport of:
+
+From 538b1e79a6e7b0bb829ab4cecc828d32105d0684 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 6 Mar 2023 12:07:33 +0100
+Subject: [PATCH] telnet: only accept option arguments in ascii
+
+To avoid embedded telnet negotiation commands etc.
+
+Reported-by: Harry Sintonen
+Closes #10728
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2023-27533.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/538b1e79a6e7b0bb829ab4cecc828d32105d0684]
+CVE: CVE-2023-27533
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/telnet.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/lib/telnet.c
++++ b/lib/telnet.c
+@@ -815,6 +815,17 @@ static void printsub(struct Curl_easy *d
+ }
+ }
+
++static bool str_is_nonascii(const char *str)
++{
++ size_t len = strlen(str);
++ while(len--) {
++ if(*str & 0x80)
++ return TRUE;
++ str++;
++ }
++ return FALSE;
++}
++
+ static CURLcode check_telnet_options(struct connectdata *conn)
+ {
+ struct curl_slist *head;
+@@ -829,6 +840,8 @@ static CURLcode check_telnet_options(str
+ /* Add the user name as an environment variable if it
+ was given on the command line */
+ if(conn->bits.user_passwd) {
++ if(str_is_nonascii(data->conn->user))
++ return CURLE_BAD_FUNCTION_ARGUMENT;
+ msnprintf(option_arg, sizeof(option_arg), "USER,%s", conn->user);
+ beg = curl_slist_append(tn->telnet_vars, option_arg);
+ if(!beg) {
+@@ -844,6 +857,9 @@ static CURLcode check_telnet_options(str
+ if(sscanf(head->data, "%127[^= ]%*[ =]%255s",
+ option_keyword, option_arg) == 2) {
+
++ if(str_is_nonascii(option_arg))
++ continue;
++
+ /* Terminal type */
+ if(strcasecompare(option_keyword, "TTYPE")) {
+ strncpy(tn->subopt_ttype, option_arg, 31);
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch b/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch
new file mode 100644
index 0000000000..46c57afb73
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27534-pre1.patch
@@ -0,0 +1,51 @@
+From 6c51adeb71da076c5c40a45e339e06bb4394a86b Mon Sep 17 00:00:00 2001
+From: Eric Vigeant <evigeant@gmail.com>
+Date: Wed, 2 Nov 2022 11:47:09 -0400
+Subject: [PATCH] cur_path: do not add '/' if homedir ends with one
+
+When using SFTP and a path relative to the user home, do not add a
+trailing '/' to the user home dir if it already ends with one.
+
+Closes #9844
+
+CVE: CVE-2023-27534
+Note:
+- The upstream patch for CVE-2023-27534 does three things:
+1) creates new path with dynbuf(dynamic buffer)
+2) solves the tilde error which causes CVE-2023-27534
+3) modifies the below added functionality to not add a trailing "/" to the user home dir if it already ends with one with dynbuf.
+- dynbuf functionalities are added in curl in later versions and are not essential to fix the vulnerability but does add extra feature in later versions.
+- This patch completes the 3rd task of the patch which was implemented without using dynbuf
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/6c51adeb71da076c5c40a45e339e06bb4394a86b]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/curl_path.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/lib/curl_path.c b/lib/curl_path.c
+index f429634..40b92ee 100644
+--- a/lib/curl_path.c
++++ b/lib/curl_path.c
+@@ -70,10 +70,14 @@ CURLcode Curl_getworkingpath(struct connectdata *conn,
+ /* It is referenced to the home directory, so strip the
+ leading '/' */
+ memcpy(real_path, homedir, homelen);
+- real_path[homelen] = '/';
+- real_path[homelen + 1] = '\0';
++ /* Only add a trailing '/' if homedir does not end with one */
++ if(homelen == 0 || real_path[homelen - 1] != '/') {
++ real_path[homelen] = '/';
++ homelen++;
++ real_path[homelen] = '\0';
++ }
+ if(working_path_len > 3) {
+- memcpy(real_path + homelen + 1, working_path + 3,
++ memcpy(real_path + homelen, working_path + 3,
+ 1 + working_path_len -3);
+ }
+ }
+--
+2.24.4
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27534.patch b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
new file mode 100644
index 0000000000..3ecd181290
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27534.patch
@@ -0,0 +1,33 @@
+From 4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 16:22:11 +0100
+Subject: [PATCH] curl_path: create the new path with dynbuf
+
+Closes #10729
+
+CVE: CVE-2023-27534
+Note: This patch is needed to backport CVE-2023-27534
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/4e2b52b5f7a3bf50a0f1494155717b02cc1df6d6]
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
+---
+ lib/curl_path.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/curl_path.c b/lib/curl_path.c
+index 40b92ee..598c5dd 100644
+--- a/lib/curl_path.c
++++ b/lib/curl_path.c
+@@ -60,7 +60,7 @@ CURLcode Curl_getworkingpath(struct connectdata *conn,
+ memcpy(real_path, working_path, 1 + working_path_len);
+ }
+ else if(conn->handler->protocol & CURLPROTO_SFTP) {
+- if((working_path_len > 1) && (working_path[1] == '~')) {
++ if((working_path_len > 2) && !memcmp(working_path, "/~/", 3)) {
+ size_t homelen = strlen(homedir);
+ real_path = malloc(homelen + working_path_len + 1);
+ if(real_path == NULL) {
+--
+2.24.4
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
new file mode 100644
index 0000000000..034b72f7e6
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535-pre1.patch
@@ -0,0 +1,236 @@
+From ed5095ed94281989e103c72e032200b83be37878 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 6 Oct 2022 00:49:10 +0200
+Subject: [PATCH] strcase: add and use Curl_timestrcmp
+
+This is a strcmp() alternative function for comparing "secrets",
+designed to take the same time no matter the content to not leak
+match/non-match info to observers based on how fast it is.
+
+The time this function takes is only a function of the shortest input
+string.
+
+Reported-by: Trail of Bits
+
+Closes #9658
+
+Upstream-Status: Backport from [https://github.com/curl/curl/commit/ed5095ed94281989e103c72e032200b83be37878 & https://github.com/curl/curl/commit/f18af4f874cecab82a9797e8c7541e0990c7a64c]
+Comment: to backport fix for CVE-2023-27535, add function Curl_timestrcmp.
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/netrc.c | 6 +++---
+ lib/strcase.c | 22 ++++++++++++++++++++++
+ lib/strcase.h | 1 +
+ lib/url.c | 33 +++++++++++++--------------------
+ lib/vauth/digest_sspi.c | 4 ++--
+ lib/vtls/vtls.c | 21 ++++++++++++++++++++-
+ 6 files changed, 61 insertions(+), 26 deletions(-)
+
+diff --git a/lib/netrc.c b/lib/netrc.c
+index 9323913..fe3fd1e 100644
+--- a/lib/netrc.c
++++ b/lib/netrc.c
+@@ -124,9 +124,9 @@ static int parsenetrc(const char *host,
+ /* we are now parsing sub-keywords concerning "our" host */
+ if(state_login) {
+ if(specific_login) {
+- state_our_login = strcasecompare(login, tok);
++ state_our_login = !Curl_timestrcmp(login, tok);
+ }
+- else if(!login || strcmp(login, tok)) {
++ else if(!login || Curl_timestrcmp(login, tok)) {
+ if(login_alloc) {
+ free(login);
+ login_alloc = FALSE;
+@@ -142,7 +142,7 @@ static int parsenetrc(const char *host,
+ }
+ else if(state_password) {
+ if((state_our_login || !specific_login)
+- && (!password || strcmp(password, tok))) {
++ && (!password || Curl_timestrcmp(password, tok))) {
+ if(password_alloc) {
+ free(password);
+ password_alloc = FALSE;
+diff --git a/lib/strcase.c b/lib/strcase.c
+index 70bf21c..ec776b3 100644
+--- a/lib/strcase.c
++++ b/lib/strcase.c
+@@ -261,6 +261,28 @@ bool Curl_safecmp(char *a, char *b)
+ return !a && !b;
+ }
+
++/*
++ * Curl_timestrcmp() returns 0 if the two strings are identical. The time this
++ * function spends is a function of the shortest string, not of the contents.
++ */
++int Curl_timestrcmp(const char *a, const char *b)
++{
++ int match = 0;
++ int i = 0;
++
++ if(a && b) {
++ while(1) {
++ match |= a[i]^b[i];
++ if(!a[i] || !b[i])
++ break;
++ i++;
++ }
++ }
++ else
++ return a || b;
++ return match;
++}
++
+ /* --- public functions --- */
+
+ int curl_strequal(const char *first, const char *second)
+diff --git a/lib/strcase.h b/lib/strcase.h
+index 8929a53..8077108 100644
+--- a/lib/strcase.h
++++ b/lib/strcase.h
+@@ -49,5 +49,6 @@ void Curl_strntoupper(char *dest, const char *src, size_t n);
+ void Curl_strntolower(char *dest, const char *src, size_t n);
+
+ bool Curl_safecmp(char *a, char *b);
++int Curl_timestrcmp(const char *first, const char *second);
+
+ #endif /* HEADER_CURL_STRCASE_H */
+diff --git a/lib/url.c b/lib/url.c
+index 9f14a7b..dfbde3b 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -886,19 +886,10 @@ socks_proxy_info_matches(const struct proxy_info* data,
+ /* the user information is case-sensitive
+ or at least it is not defined as case-insensitive
+ see https://tools.ietf.org/html/rfc3986#section-3.2.1 */
+- if((data->user == NULL) != (needle->user == NULL))
+- return FALSE;
+- /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->user &&
+- needle->user &&
+- strcmp(data->user, needle->user) != 0)
+- return FALSE;
+- if((data->passwd == NULL) != (needle->passwd == NULL))
+- return FALSE;
++
+ /* curl_strequal does a case insentive comparison, so do not use it here! */
+- if(data->passwd &&
+- needle->passwd &&
+- strcmp(data->passwd, needle->passwd) != 0)
++ if(Curl_timestrcmp(data->user, needle->user) ||
++ Curl_timestrcmp(data->passwd, needle->passwd))
+ return FALSE;
+ return TRUE;
+ }
+@@ -1257,10 +1248,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!(needle->handler->flags & PROTOPT_CREDSPERREQUEST)) {
+ /* This protocol requires credentials per connection,
+ so verify that we're using the same name and password as well */
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd) ||
+- !Curl_safecmp(needle->sasl_authzid, check->sasl_authzid) ||
+- !Curl_safecmp(needle->oauth_bearer, check->oauth_bearer)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd) ||
++ Curl_timestrcmp(needle->sasl_authzid, check->sasl_authzid) ||
++ Curl_timestrcmp(needle->oauth_bearer, check->oauth_bearer)) {
+ /* one of them was different */
+ continue;
+ }
+@@ -1326,8 +1317,8 @@ ConnectionExists(struct Curl_easy *data,
+ possible. (Especially we must not reuse the same connection if
+ partway through a handshake!) */
+ if(wantNTLMhttp) {
+- if(strcmp(needle->user, check->user) ||
+- strcmp(needle->passwd, check->passwd)) {
++ if(Curl_timestrcmp(needle->user, check->user) ||
++ Curl_timestrcmp(needle->passwd, check->passwd)) {
+
+ /* we prefer a credential match, but this is at least a connection
+ that can be reused and "upgraded" to NTLM */
+@@ -1348,8 +1339,10 @@ ConnectionExists(struct Curl_easy *data,
+ if(!check->http_proxy.user || !check->http_proxy.passwd)
+ continue;
+
+- if(strcmp(needle->http_proxy.user, check->http_proxy.user) ||
+- strcmp(needle->http_proxy.passwd, check->http_proxy.passwd))
++ if(Curl_timestrcmp(needle->http_proxy.user,
++ check->http_proxy.user) ||
++ Curl_timestrcmp(needle->http_proxy.passwd,
++ check->http_proxy.passwd))
+ continue;
+ }
+ else if(check->proxy_ntlm_state != NTLMSTATE_NONE) {
+diff --git a/lib/vauth/digest_sspi.c b/lib/vauth/digest_sspi.c
+index a109056..3986386 100644
+--- a/lib/vauth/digest_sspi.c
++++ b/lib/vauth/digest_sspi.c
+@@ -450,8 +450,8 @@ CURLcode Curl_auth_create_digest_http_message(struct Curl_easy *data,
+ has changed then delete that context. */
+ if((userp && !digest->user) || (!userp && digest->user) ||
+ (passwdp && !digest->passwd) || (!passwdp && digest->passwd) ||
+- (userp && digest->user && strcmp(userp, digest->user)) ||
+- (passwdp && digest->passwd && strcmp(passwdp, digest->passwd))) {
++ (userp && digest->user && Curl_timestrcmp(userp, digest->user)) ||
++ (passwdp && digest->passwd && Curl_timestrcmp(passwdp, digest->passwd))) {
+ if(digest->http_context) {
+ s_pSecFn->DeleteSecurityContext(digest->http_context);
+ Curl_safefree(digest->http_context);
+diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c
+index e8cb70f..70a9391 100644
+--- a/lib/vtls/vtls.c
++++ b/lib/vtls/vtls.c
+@@ -98,9 +98,15 @@ Curl_ssl_config_matches(struct ssl_primary_config* data,
+ Curl_safecmp(data->issuercert, needle->issuercert) &&
+ Curl_safecmp(data->clientcert, needle->clientcert) &&
+ Curl_safecmp(data->random_file, needle->random_file) &&
+- Curl_safecmp(data->egdsocket, needle->egdsocket) &&
++ Curl_safecmp(data->egdsocket, needle->egdsocket) &&
++#ifdef USE_TLS_SRP
++ !Curl_timestrcmp(data->username, needle->username) &&
++ !Curl_timestrcmp(data->password, needle->password) &&
++ (data->authtype == needle->authtype) &&
++#endif
+ Curl_safe_strcasecompare(data->cipher_list, needle->cipher_list) &&
+ Curl_safe_strcasecompare(data->cipher_list13, needle->cipher_list13) &&
++ Curl_safe_strcasecompare(data->CRLfile, needle->CRLfile) &&
+ Curl_safe_strcasecompare(data->pinned_key, needle->pinned_key))
+ return TRUE;
+
+@@ -117,6 +123,9 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
+ dest->verifyhost = source->verifyhost;
+ dest->verifystatus = source->verifystatus;
+ dest->sessionid = source->sessionid;
++#ifdef USE_TLS_SRP
++ dest->authtype = source->authtype;
++#endif
+
+ CLONE_STRING(CApath);
+ CLONE_STRING(CAfile);
+@@ -127,6 +136,11 @@ Curl_clone_primary_ssl_config(struct ssl_primary_config *source,
+ CLONE_STRING(cipher_list);
+ CLONE_STRING(cipher_list13);
+ CLONE_STRING(pinned_key);
++ CLONE_STRING(CRLfile);
++#ifdef USE_TLS_SRP
++ CLONE_STRING(username);
++ CLONE_STRING(password);
++#endif
+
+ return TRUE;
+ }
+@@ -142,6 +156,11 @@ void Curl_free_primary_ssl_config(struct ssl_primary_config* sslc)
+ Curl_safefree(sslc->cipher_list);
+ Curl_safefree(sslc->cipher_list13);
+ Curl_safefree(sslc->pinned_key);
++ Curl_safefree(sslc->CRLfile);
++#ifdef USE_TLS_SRP
++ Curl_safefree(sslc->username);
++ Curl_safefree(sslc->password);
++#endif
+ }
+
+ #ifdef USE_SSL
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27535.patch b/meta/recipes-support/curl/curl/CVE-2023-27535.patch
new file mode 100644
index 0000000000..e38390a57c
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27535.patch
@@ -0,0 +1,170 @@
+From 8f4608468b890dce2dad9f91d5607ee7e9c1aba1 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 9 Mar 2023 17:47:06 +0100
+Subject: [PATCH] ftp: add more conditions for connection reuse
+
+Reported-by: Harry Sintonen
+Closes #10730
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2023-27535.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/8f4608468b890dce2dad9f91d5607ee7e9c1aba1]
+CVE: CVE-2023-27535
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/ftp.c | 30 ++++++++++++++++++++++++++++--
+ lib/ftp.h | 5 +++++
+ lib/setopt.c | 2 +-
+ lib/url.c | 16 +++++++++++++++-
+ lib/urldata.h | 4 ++--
+ 5 files changed, 51 insertions(+), 6 deletions(-)
+
+diff --git a/lib/ftp.c b/lib/ftp.c
+index 31a34e8..7a82a74 100644
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -4059,6 +4059,10 @@ static CURLcode ftp_disconnect(struct connectdata *conn, bool dead_connection)
+ }
+
+ freedirs(ftpc);
++ free(ftpc->account);
++ ftpc->account = NULL;
++ free(ftpc->alternative_to_user);
++ ftpc->alternative_to_user = NULL;
+ free(ftpc->prevpath);
+ ftpc->prevpath = NULL;
+ free(ftpc->server_os);
+@@ -4326,11 +4330,31 @@ static CURLcode ftp_setup_connection(struct connectdata *conn)
+ struct Curl_easy *data = conn->data;
+ char *type;
+ struct FTP *ftp;
++ struct ftp_conn *ftpc = &conn->proto.ftpc;
+
+- conn->data->req.protop = ftp = calloc(sizeof(struct FTP), 1);
++ ftp = calloc(sizeof(struct FTP), 1);
+ if(NULL == ftp)
+ return CURLE_OUT_OF_MEMORY;
+
++ /* clone connection related data that is FTP specific */
++ if(data->set.str[STRING_FTP_ACCOUNT]) {
++ ftpc->account = strdup(data->set.str[STRING_FTP_ACCOUNT]);
++ if(!ftpc->account) {
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ if(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]) {
++ ftpc->alternative_to_user =
++ strdup(data->set.str[STRING_FTP_ALTERNATIVE_TO_USER]);
++ if(!ftpc->alternative_to_user) {
++ Curl_safefree(ftpc->account);
++ free(ftp);
++ return CURLE_OUT_OF_MEMORY;
++ }
++ }
++ conn->data->req.protop = ftp;
++
+ ftp->path = &data->state.up.path[1]; /* don't include the initial slash */
+
+ /* FTP URLs support an extension like ";type=<typecode>" that
+@@ -4366,7 +4390,9 @@ static CURLcode ftp_setup_connection(struct connectdata *conn)
+ /* get some initial data into the ftp struct */
+ ftp->transfer = FTPTRANSFER_BODY;
+ ftp->downloadsize = 0;
+- conn->proto.ftpc.known_filesize = -1; /* unknown size for now */
++ ftpc->known_filesize = -1; /* unknown size for now */
++ ftpc->use_ssl = data->set.use_ssl;
++ ftpc->ccc = data->set.ftp_ccc;
+
+ return CURLE_OK;
+ }
+diff --git a/lib/ftp.h b/lib/ftp.h
+index 984347f..163dcb3 100644
+--- a/lib/ftp.h
++++ b/lib/ftp.h
+@@ -116,6 +116,8 @@ struct FTP {
+ struct */
+ struct ftp_conn {
+ struct pingpong pp;
++ char *account;
++ char *alternative_to_user;
+ char *entrypath; /* the PWD reply when we logged on */
+ char **dirs; /* realloc()ed array for path components */
+ int dirdepth; /* number of entries used in the 'dirs' array */
+@@ -141,6 +143,9 @@ struct ftp_conn {
+ ftpstate state; /* always use ftp.c:state() to change state! */
+ ftpstate state_saved; /* transfer type saved to be reloaded after
+ data connection is established */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
++ unsigned char ccc; /* ccc level for this connection */
+ curl_off_t retr_size_saved; /* Size of retrieved file saved */
+ char *server_os; /* The target server operating system. */
+ curl_off_t known_filesize; /* file size is different from -1, if wildcard
+diff --git a/lib/setopt.c b/lib/setopt.c
+index 4d96f6b..a91bb70 100644
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -2126,7 +2126,7 @@ CURLcode Curl_vsetopt(struct Curl_easy *data, CURLoption option, va_list param)
+ arg = va_arg(param, long);
+ if((arg < CURLUSESSL_NONE) || (arg >= CURLUSESSL_LAST))
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+- data->set.use_ssl = (curl_usessl)arg;
++ data->set.use_ssl = (unsigned char)arg;
+ break;
+
+ case CURLOPT_SSL_OPTIONS:
+diff --git a/lib/url.c b/lib/url.c
+index dfbde3b..f84375c 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1257,10 +1257,24 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
+- if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
++#ifdef USE_SSH
++ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+ continue;
+ }
++#endif
++#ifndef CURL_DISABLE_FTP
++ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_FTP) {
++ /* Also match ACCOUNT, ALTERNATIVE-TO-USER, USE_SSL and CCC options */
++ if(Curl_timestrcmp(needle->proto.ftpc.account,
++ check->proto.ftpc.account) ||
++ Curl_timestrcmp(needle->proto.ftpc.alternative_to_user,
++ check->proto.ftpc.alternative_to_user) ||
++ (needle->proto.ftpc.use_ssl != check->proto.ftpc.use_ssl) ||
++ (needle->proto.ftpc.ccc != check->proto.ftpc.ccc))
++ continue;
++ }
++#endif
+
+ if(!needle->bits.httpproxy || (needle->handler->flags&PROTOPT_SSL) ||
+ needle->bits.tunnel_proxy) {
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 168f874..51b793b 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1730,8 +1730,6 @@ struct UserDefined {
+ void *ssh_keyfunc_userp; /* custom pointer to callback */
+ enum CURL_NETRC_OPTION
+ use_netrc; /* defined in include/curl.h */
+- curl_usessl use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
+- IMAP or POP3 or others! */
+ long new_file_perms; /* Permissions to use when creating remote files */
+ long new_directory_perms; /* Permissions to use when creating remote dirs */
+ long ssh_auth_types; /* allowed SSH auth types */
+@@ -1851,6 +1849,8 @@ struct UserDefined {
+ BIT(http09_allowed); /* allow HTTP/0.9 responses */
+ BIT(mail_rcpt_allowfails); /* allow RCPT TO command to fail for some
+ recipients */
++ unsigned char use_ssl; /* if AUTH TLS is to be attempted etc, for FTP or
++ IMAP or POP3 or others! (type: curl_usessl)*/
+ };
+
+ struct Names {
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27536.patch b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
new file mode 100644
index 0000000000..b04a77de25
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27536.patch
@@ -0,0 +1,55 @@
+From cb49e67303dbafbab1cebf4086e3ec15b7d56ee5 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 10 Mar 2023 09:22:43 +0100
+Subject: [PATCH] url: only reuse connections with same GSS delegation
+
+Reported-by: Harry Sintonen
+Closes #10731
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/cb49e67303dbafbab1cebf4086e3ec15b7d56ee5]
+CVE: CVE-2023-27536
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/url.c | 6 ++++++
+ lib/urldata.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+diff --git a/lib/url.c b/lib/url.c
+index f84375c..87f4eb0 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1257,6 +1257,11 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
++ /* GSS delegation differences do not actually affect every connection
++ and auth method, but this check takes precaution before efficiency */
++ if(needle->gssapi_delegation != check->gssapi_delegation)
++ continue;
++
+ #ifdef USE_SSH
+ else if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+@@ -1708,6 +1713,7 @@ static struct connectdata *allocate_conn(struct Curl_easy *data)
+ conn->fclosesocket = data->set.fclosesocket;
+ conn->closesocket_client = data->set.closesocket_client;
+ conn->lastused = Curl_now(); /* used now */
++ conn->gssapi_delegation = data->set.gssapi_delegation;
+
+ return conn;
+ error:
+diff --git a/lib/urldata.h b/lib/urldata.h
+index 51b793b..b8a611b 100644
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1118,6 +1118,7 @@ struct connectdata {
+ handle */
+ BIT(sock_accepted); /* TRUE if the SECONDARYSOCKET was created with
+ accept() */
++ long gssapi_delegation; /* inherited from set.gssapi_delegation */
+ };
+
+ /* The end of connectdata. */
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-27538.patch b/meta/recipes-support/curl/curl/CVE-2023-27538.patch
new file mode 100644
index 0000000000..6c40989d3b
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-27538.patch
@@ -0,0 +1,31 @@
+From af369db4d3833272b8ed443f7fcc2e757a0872eb Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Fri, 10 Mar 2023 08:22:51 +0100
+Subject: [PATCH] url: fix the SSH connection reuse check
+
+Reported-by: Harry Sintonen
+Closes #10735
+
+CVE: CVE-2023-27538
+Upstream-Status: Backport [https://github.com/curl/curl/commit/af369db4d3833272b8ed443f7fcc2e757a0872eb]
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ lib/url.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/url.c b/lib/url.c
+index 8da0245..9f14a7b 100644
+--- a/lib/url.c
++++ b/lib/url.c
+@@ -1266,7 +1266,7 @@ ConnectionExists(struct Curl_easy *data,
+ }
+ }
+
+- if(get_protocol_family(needle->handler->protocol) == PROTO_FAMILY_SSH) {
++ if(get_protocol_family(needle->handler->protocol) & PROTO_FAMILY_SSH) {
+ if(!ssh_config_matches(needle, check))
+ continue;
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
new file mode 100644
index 0000000000..eaa6fdc327
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320-fol1.patch
@@ -0,0 +1,197 @@
+From f446258f0269a62289cca0210157cb8558d0edc3 Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 16 May 2023 23:40:42 +0200
+Subject: [PATCH] hostip: include easy_lock.h before using
+ GLOBAL_INIT_IS_THREADSAFE
+
+Since that header file is the only place that define can be defined.
+
+Reported-by: Marc Deslauriers
+
+Follow-up to 13718030ad4b3209
+
+Closes #11121
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/f446258f0269a62289cca0210157cb8558d0edc3]
+CVE: CVE-2023-28320
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/easy_lock.h | 109 ++++++++++++++++++++++++++++++++++++++++++++++++
+ lib/hostip.c | 10 ++---
+ lib/hostip.h | 9 ----
+ 3 files changed, 113 insertions(+), 15 deletions(-)
+ create mode 100644 lib/easy_lock.h
+
+diff --git a/lib/easy_lock.h b/lib/easy_lock.h
+new file mode 100644
+index 0000000..6399a39
+--- /dev/null
++++ b/lib/easy_lock.h
+@@ -0,0 +1,109 @@
++#ifndef HEADER_CURL_EASY_LOCK_H
++#define HEADER_CURL_EASY_LOCK_H
++/***************************************************************************
++ * _ _ ____ _
++ * Project ___| | | | _ \| |
++ * / __| | | | |_) | |
++ * | (__| |_| | _ <| |___
++ * \___|\___/|_| \_\_____|
++ *
++ * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
++ *
++ * This software is licensed as described in the file COPYING, which
++ * you should have received as part of this distribution. The terms
++ * are also available at https://curl.se/docs/copyright.html.
++ *
++ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
++ * copies of the Software, and permit persons to whom the Software is
++ * furnished to do so, under the terms of the COPYING file.
++ *
++ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
++ * KIND, either express or implied.
++ *
++ * SPDX-License-Identifier: curl
++ *
++ ***************************************************************************/
++
++#include "curl_setup.h"
++
++#define GLOBAL_INIT_IS_THREADSAFE
++
++#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
++
++#ifdef __MINGW32__
++#ifndef __MINGW64_VERSION_MAJOR
++#if (__MINGW32_MAJOR_VERSION < 5) || \
++ (__MINGW32_MAJOR_VERSION == 5 && __MINGW32_MINOR_VERSION == 0)
++/* mingw >= 5.0.1 defines SRWLOCK, and slightly different from MS define */
++typedef PVOID SRWLOCK, *PSRWLOCK;
++#endif
++#endif
++#ifndef SRWLOCK_INIT
++#define SRWLOCK_INIT NULL
++#endif
++#endif /* __MINGW32__ */
++
++#define curl_simple_lock SRWLOCK
++#define CURL_SIMPLE_LOCK_INIT SRWLOCK_INIT
++
++#define curl_simple_lock_lock(m) AcquireSRWLockExclusive(m)
++#define curl_simple_lock_unlock(m) ReleaseSRWLockExclusive(m)
++
++#elif defined(HAVE_ATOMIC) && defined(HAVE_STDATOMIC_H)
++#include <stdatomic.h>
++#if defined(HAVE_SCHED_YIELD)
++#include <sched.h>
++#endif
++
++#define curl_simple_lock atomic_int
++#define CURL_SIMPLE_LOCK_INIT 0
++
++/* a clang-thing */
++#ifndef __has_builtin
++#define __has_builtin(x) 0
++#endif
++
++#ifndef __INTEL_COMPILER
++/* The Intel compiler tries to look like GCC *and* clang *and* lies in its
++ __has_builtin() function, so override it. */
++
++/* if GCC on i386/x86_64 or if the built-in is present */
++#if ( (defined(__GNUC__) && !defined(__clang__)) && \
++ (defined(__i386__) || defined(__x86_64__))) || \
++ __has_builtin(__builtin_ia32_pause)
++#define HAVE_BUILTIN_IA32_PAUSE
++#endif
++
++#endif
++
++static inline void curl_simple_lock_lock(curl_simple_lock *lock)
++{
++ for(;;) {
++ if(!atomic_exchange_explicit(lock, true, memory_order_acquire))
++ break;
++ /* Reduce cache coherency traffic */
++ while(atomic_load_explicit(lock, memory_order_relaxed)) {
++ /* Reduce load (not mandatory) */
++#ifdef HAVE_BUILTIN_IA32_PAUSE
++ __builtin_ia32_pause();
++#elif defined(__aarch64__)
++ __asm__ volatile("yield" ::: "memory");
++#elif defined(HAVE_SCHED_YIELD)
++ sched_yield();
++#endif
++ }
++ }
++}
++
++static inline void curl_simple_lock_unlock(curl_simple_lock *lock)
++{
++ atomic_store_explicit(lock, false, memory_order_release);
++}
++
++#else
++
++#undef GLOBAL_INIT_IS_THREADSAFE
++
++#endif
++
++#endif /* HEADER_CURL_EASY_LOCK_H */
+diff --git a/lib/hostip.c b/lib/hostip.c
+index 5231a74..d5bf881 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -68,6 +68,8 @@
+ #include "curl_memory.h"
+ #include "memdebug.h"
+
++#include "easy_lock.h"
++
+ #if defined(CURLRES_SYNCH) && \
+ defined(HAVE_ALARM) && \
+ defined(SIGALRM) && \
+@@ -77,10 +79,6 @@
+ #define USE_ALARM_TIMEOUT
+ #endif
+
+-#ifdef USE_ALARM_TIMEOUT
+-#include "easy_lock.h"
+-#endif
+-
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -259,8 +257,8 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+-sigjmp_buf curl_jmpenv;
+-curl_simple_lock curl_jmpenv_lock;
++static sigjmp_buf curl_jmpenv;
++static curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+diff --git a/lib/hostip.h b/lib/hostip.h
+index baf1e58..d7f73d9 100644
+--- a/lib/hostip.h
++++ b/lib/hostip.h
+@@ -196,15 +196,6 @@ Curl_cache_addr(struct Curl_easy *data, Curl_addrinfo *addr,
+ #define CURL_INADDR_NONE INADDR_NONE
+ #endif
+
+-#ifdef HAVE_SIGSETJMP
+-/* Forward-declaration of variable defined in hostip.c. Beware this
+- * is a global and unique instance. This is used to store the return
+- * address that we can jump back to from inside a signal handler.
+- * This is not thread-safe stuff.
+- */
+-extern sigjmp_buf curl_jmpenv;
+-#endif
+-
+ /*
+ * Function provided by the resolver backend to set DNS servers to use.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28320.patch b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
new file mode 100644
index 0000000000..0c9b67440a
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28320.patch
@@ -0,0 +1,86 @@
+From 13718030ad4b3209a7583b4f27f683cd3a6fa5f2 Mon Sep 17 00:00:00 2001
+From: Harry Sintonen <sintonen@iki.fi>
+Date: Tue, 25 Apr 2023 09:22:26 +0200
+Subject: [PATCH] hostip: add locks around use of global buffer for alarm()
+
+When building with the sync name resolver and timeout ability we now
+require thread-safety to be present to enable it.
+
+Closes #11030
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/13718030ad4b3209a7583b4f27f683cd3a6fa5f2]
+CVE: CVE-2023-28320
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/hostip.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/lib/hostip.c b/lib/hostip.c
+index f5bb634..5231a74 100644
+--- a/lib/hostip.c
++++ b/lib/hostip.c
+@@ -68,12 +68,19 @@
+ #include "curl_memory.h"
+ #include "memdebug.h"
+
+-#if defined(CURLRES_SYNCH) && \
+- defined(HAVE_ALARM) && defined(SIGALRM) && defined(HAVE_SIGSETJMP)
++#if defined(CURLRES_SYNCH) && \
++ defined(HAVE_ALARM) && \
++ defined(SIGALRM) && \
++ defined(HAVE_SIGSETJMP) && \
++ defined(GLOBAL_INIT_IS_THREADSAFE)
+ /* alarm-based timeouts can only be used with all the dependencies satisfied */
+ #define USE_ALARM_TIMEOUT
+ #endif
+
++#ifdef USE_ALARM_TIMEOUT
++#include "easy_lock.h"
++#endif
++
+ #define MAX_HOSTCACHE_LEN (255 + 7) /* max FQDN + colon + port number + zero */
+
+ /*
+@@ -248,11 +255,12 @@ void Curl_hostcache_prune(struct Curl_easy *data)
+ Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
+ }
+
+-#ifdef HAVE_SIGSETJMP
++#ifdef USE_ALARM_TIMEOUT
+ /* Beware this is a global and unique instance. This is used to store the
+ return address that we can jump back to from inside a signal handler. This
+ is not thread-safe stuff. */
+ sigjmp_buf curl_jmpenv;
++curl_simple_lock curl_jmpenv_lock;
+ #endif
+
+ /* lookup address, returns entry if found and not stale */
+@@ -614,7 +622,6 @@ enum resolve_t Curl_resolv(struct connectdata *conn,
+ static
+ RETSIGTYPE alarmfunc(int sig)
+ {
+- /* this is for "-ansi -Wall -pedantic" to stop complaining! (rabe) */
+ (void)sig;
+ siglongjmp(curl_jmpenv, 1);
+ }
+@@ -695,6 +702,8 @@ enum resolve_t Curl_resolv_timeout(struct connectdata *conn,
+ This should be the last thing we do before calling Curl_resolv(),
+ as otherwise we'd have to worry about variables that get modified
+ before we invoke Curl_resolv() (and thus use "volatile"). */
++ curl_simple_lock_lock(&curl_jmpenv_lock);
++
+ if(sigsetjmp(curl_jmpenv, 1)) {
+ /* this is coming from a siglongjmp() after an alarm signal */
+ failf(data, "name lookup timed out");
+@@ -763,6 +772,8 @@ clean_up:
+ #endif
+ #endif /* HAVE_SIGACTION */
+
++ curl_simple_lock_unlock(&curl_jmpenv_lock);
++
+ /* switch back the alarm() to either zero or to what it was before minus
+ the time we spent until now! */
+ if(prev_alarm) {
+--
+2.25.1
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28321.patch b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
new file mode 100644
index 0000000000..da1d1fdcd6
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28321.patch
@@ -0,0 +1,272 @@
+Upstream-Status: Backport [import from ubuntu curl_7.68.0-1ubuntu2.20 with
+minor change to tests/data/test1397 part so the patch can be apply.
+upstream: https://github.com/curl/curl/commit/199f2d440d8659b42 ]
+CVE: CVE-2023-28321
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+This backport was obtained from SUSE.
+
+From 199f2d440d8659b42670c1b796220792b01a97bf Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Mon, 24 Apr 2023 21:07:02 +0200
+Subject: [PATCH] hostcheck: fix host name wildcard checking
+
+The leftmost "label" of the host name can now only match against single
+'*'. Like the browsers have worked for a long time.
+
+- extended unit test 1397 for this
+- move some SOURCE variables from unit/Makefile.am to unit/Makefile.inc
+
+Reported-by: Hiroki Kurosawa
+Closes #11018
+---
+ lib/hostcheck.c | 50 +++++++--------
+ tests/data/test1397 | 10 ++-
+ tests/unit/Makefile.am | 94 ----------------------------
+ tests/unit/Makefile.inc | 94 ++++++++++++++++++++++++++++
+ tests/unit/unit1397.c | 134 ++++++++++++++++++++++++----------------
+ 5 files changed, 202 insertions(+), 180 deletions(-)
+
+--- a/lib/hostcheck.c
++++ b/lib/hostcheck.c
+@@ -58,15 +58,19 @@
+ * apparent distinction between a name and an IP. We need to detect the use of
+ * an IP address and not wildcard match on such names.
+ *
++ * Only match on "*" being used for the leftmost label, not "a*", "a*b" nor
++ * "*b".
++ *
++ * @unittest: 1397
++ *
+ * NOTE: hostmatch() gets called with copied buffers so that it can modify the
+ * contents at will.
+ */
+
+ static int hostmatch(char *hostname, char *pattern)
+ {
+- const char *pattern_label_end, *pattern_wildcard, *hostname_label_end;
+- int wildcard_enabled;
+- size_t prefixlen, suffixlen;
++ const char *pattern_label_end, *hostname_label_end;
++ size_t suffixlen;
+ struct in_addr ignored;
+ #ifdef ENABLE_IPV6
+ struct sockaddr_in6 si6;
+@@ -80,13 +84,12 @@ static int hostmatch(char *hostname, cha
+ if(pattern[len-1]=='.')
+ pattern[len-1] = 0;
+
+- pattern_wildcard = strchr(pattern, '*');
+- if(pattern_wildcard == NULL)
++ if(strncmp(pattern, "*.", 2))
+ return strcasecompare(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+ /* detect IP address as hostname and fail the match if so */
+- if(Curl_inet_pton(AF_INET, hostname, &ignored) > 0)
++ else if(Curl_inet_pton(AF_INET, hostname, &ignored) > 0)
+ return CURL_HOST_NOMATCH;
+ #ifdef ENABLE_IPV6
+ if(Curl_inet_pton(AF_INET6, hostname, &si6.sin6_addr) > 0)
+@@ -95,14 +98,9 @@ static int hostmatch(char *hostname, cha
+
+ /* We require at least 2 dots in pattern to avoid too wide wildcard
+ match. */
+- wildcard_enabled = 1;
+ pattern_label_end = strchr(pattern, '.');
+- if(pattern_label_end == NULL || strchr(pattern_label_end + 1, '.') == NULL ||
+- pattern_wildcard > pattern_label_end ||
+- strncasecompare(pattern, "xn--", 4)) {
+- wildcard_enabled = 0;
+- }
+- if(!wildcard_enabled)
++ if(pattern_label_end == NULL ||
++ strchr(pattern_label_end + 1, '.') == NULL)
+ return strcasecompare(pattern, hostname) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+
+@@ -117,11 +115,9 @@ static int hostmatch(char *hostname, cha
+ if(hostname_label_end - hostname < pattern_label_end - pattern)
+ return CURL_HOST_NOMATCH;
+
+- prefixlen = pattern_wildcard - pattern;
+- suffixlen = pattern_label_end - (pattern_wildcard + 1);
+- return strncasecompare(pattern, hostname, prefixlen) &&
+- strncasecompare(pattern_wildcard + 1, hostname_label_end - suffixlen,
+- suffixlen) ?
++ suffixlen = pattern_label_end - (pattern + 1);
++ return strncasecompare(pattern + 1, hostname_label_end - suffixlen,
++ suffixlen) ?
+ CURL_HOST_MATCH : CURL_HOST_NOMATCH;
+ }
+
+--- a/tests/data/test1397
++++ b/tests/data/test1397
+@@ -2,8 +2,7 @@
+ <info>
+ <keywords>
+ unittest
+-ssl
+-wildcard
++Curl_cert_hostcheck
+ </keywords>
+ </info>
+
+@@ -16,9 +15,8 @@ none
+ <features>
+ unittest
+ </features>
+- <name>
+-Check wildcard certificate matching function Curl_cert_hostcheck
+- </name>
++<name>
++Curl_cert_hostcheck unit tests
++</name>
+ </client>
+-
+ </testcase>
+--- a/tests/unit/unit1397.c
++++ b/tests/unit/unit1397.c
+@@ -21,8 +21,6 @@
+ ***************************************************************************/
+ #include "curlcheck.h"
+
+-#include "hostcheck.h" /* from the lib dir */
+-
+ static CURLcode unit_setup(void)
+ {
+ return CURLE_OK;
+@@ -30,50 +28,94 @@ static CURLcode unit_setup(void)
+
+ static void unit_stop(void)
+ {
+- /* done before shutting down and exiting */
+ }
+
+-UNITTEST_START
++* only these backends define the tested functions */
++#if defined(USE_OPENSSL) || defined(USE_GSKIT) || \
++ defined(USE_SCHANNEL)
++#include "hostcheck.h"
++struct testcase {
++ const char *host;
++ const char *pattern;
++ bool match;
++};
++
++static struct testcase tests[] = {
++ {"", "", FALSE},
++ {"a", "", FALSE},
++ {"", "b", FALSE},
++ {"a", "b", FALSE},
++ {"aa", "bb", FALSE},
++ {"\xff", "\xff", TRUE},
++ {"aa.aa.aa", "aa.aa.bb", FALSE},
++ {"aa.aa.aa", "aa.aa.aa", TRUE},
++ {"aa.aa.aa", "*.aa.bb", FALSE},
++ {"aa.aa.aa", "*.aa.aa", TRUE},
++ {"192.168.0.1", "192.168.0.1", TRUE},
++ {"192.168.0.1", "*.168.0.1", FALSE},
++ {"192.168.0.1", "*.0.1", FALSE},
++ {"h.ello", "*.ello", FALSE},
++ {"h.ello.", "*.ello", FALSE},
++ {"h.ello", "*.ello.", FALSE},
++ {"h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo", " *.e.llo", FALSE},
++ {" h.e.llo", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo", TRUE},
++ {"*.e.llo.", "*.e.llo", TRUE},
++ {"************.e.llo.", "*.e.llo", TRUE},
++ {"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
++ "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
++ "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
++ "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
++ "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
++ ".e.llo.", "*.e.llo", TRUE},
++ {"\xfe\xfe.e.llo.", "*.e.llo", TRUE},
++ {"h.e.llo.", "*.e.llo.", TRUE},
++ {"h.e.llo", "*.e.llo.", TRUE},
++ {".h.e.llo", "*.e.llo.", FALSE},
++ {"h.e.llo", "*.*.llo.", FALSE},
++ {"h.e.llo", "h.*.llo", FALSE},
++ {"h.e.llo", "h.e.*", FALSE},
++ {"hello", "*.ello", FALSE},
++ {"hello", "**llo", FALSE},
++ {"bar.foo.example.com", "*.example.com", FALSE},
++ {"foo.example.com", "*.example.com", TRUE},
++ {"baz.example.net", "b*z.example.net", FALSE},
++ {"foobaz.example.net", "*baz.example.net", FALSE},
++ {"xn--l8j.example.local", "x*.example.local", FALSE},
++ {"xn--l8j.example.net", "*.example.net", TRUE},
++ {"xn--l8j.example.net", "*j.example.net", FALSE},
++ {"xn--l8j.example.net", "xn--l8j.example.net", TRUE},
++ {"xn--l8j.example.net", "xn--l8j.*.net", FALSE},
++ {"xl8j.example.net", "*.example.net", TRUE},
++ {"fe80::3285:a9ff:fe46:b619", "*::3285:a9ff:fe46:b619", FALSE},
++ {"fe80::3285:a9ff:fe46:b619", "fe80::3285:a9ff:fe46:b619", TRUE},
++ {NULL, NULL, FALSE}
++};
+
+-/* only these backends define the tested functions */
+-#if defined(USE_OPENSSL) || defined(USE_GSKIT)
++UNITTEST_START
++{
++ int i;
++ for(i = 0; tests[i].host; i++) {
++ if(tests[i].match != Curl_cert_hostcheck(tests[i].pattern,
++ tests[i].host)) {
++ fprintf(stderr,
++ "HOST: %s\n"
++ "PTRN: %s\n"
++ "did %sMATCH\n",
++ tests[i].host,
++ tests[i].pattern,
++ tests[i].match ? "NOT ": "");
++ unitfail++;
++ }
++ }
++}
+
+- /* here you start doing things and checking that the results are good */
++UNITTEST_STOP
++#else
+
+-fail_unless(Curl_cert_hostcheck("www.example.com", "www.example.com"),
+- "good 1");
+-fail_unless(Curl_cert_hostcheck("*.example.com", "www.example.com"),
+- "good 2");
+-fail_unless(Curl_cert_hostcheck("xxx*.example.com", "xxxwww.example.com"),
+- "good 3");
+-fail_unless(Curl_cert_hostcheck("f*.example.com", "foo.example.com"),
+- "good 4");
+-fail_unless(Curl_cert_hostcheck("192.168.0.0", "192.168.0.0"),
+- "good 5");
+-
+-fail_if(Curl_cert_hostcheck("xxx.example.com", "www.example.com"), "bad 1");
+-fail_if(Curl_cert_hostcheck("*", "www.example.com"), "bad 2");
+-fail_if(Curl_cert_hostcheck("*.*.com", "www.example.com"), "bad 3");
+-fail_if(Curl_cert_hostcheck("*.example.com", "baa.foo.example.com"), "bad 4");
+-fail_if(Curl_cert_hostcheck("f*.example.com", "baa.example.com"), "bad 5");
+-fail_if(Curl_cert_hostcheck("*.com", "example.com"), "bad 6");
+-fail_if(Curl_cert_hostcheck("*fail.com", "example.com"), "bad 7");
+-fail_if(Curl_cert_hostcheck("*.example.", "www.example."), "bad 8");
+-fail_if(Curl_cert_hostcheck("*.example.", "www.example"), "bad 9");
+-fail_if(Curl_cert_hostcheck("", "www"), "bad 10");
+-fail_if(Curl_cert_hostcheck("*", "www"), "bad 11");
+-fail_if(Curl_cert_hostcheck("*.168.0.0", "192.168.0.0"), "bad 12");
+-fail_if(Curl_cert_hostcheck("www.example.com", "192.168.0.0"), "bad 13");
+-
+-#ifdef ENABLE_IPV6
+-fail_if(Curl_cert_hostcheck("*::3285:a9ff:fe46:b619",
+- "fe80::3285:a9ff:fe46:b619"), "bad 14");
+-fail_unless(Curl_cert_hostcheck("fe80::3285:a9ff:fe46:b619",
+- "fe80::3285:a9ff:fe46:b619"), "good 6");
+-#endif
++UNITTEST_START
+
++UNITTEST_STOP
+ #endif
+
+- /* you end the test code like this: */
+-
+-UNITTEST_STOP
diff --git a/meta/recipes-support/curl/curl/CVE-2023-28322.patch b/meta/recipes-support/curl/curl/CVE-2023-28322.patch
new file mode 100644
index 0000000000..9351a2c286
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-28322.patch
@@ -0,0 +1,380 @@
+CVE: CVE-2023-28322
+Upstream-Status: Backport [ import patch from ubuntu curl_7.68.0-1ubuntu2.20
+upstream https://github.com/curl/curl/commit/7815647d6582c0a4900be2e1de ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 7815647d6582c0a4900be2e1de6c5e61272c496b Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Tue, 25 Apr 2023 08:28:01 +0200
+Subject: [PATCH] lib: unify the upload/method handling
+
+By making sure we set state.upload based on the set.method value and not
+independently as set.upload, we reduce confusion and mixup risks, both
+internally and externally.
+
+Closes #11017
+---
+ lib/curl_rtmp.c | 4 ++--
+ lib/file.c | 4 ++--
+ lib/ftp.c | 8 ++++----
+ lib/http.c | 4 ++--
+ lib/imap.c | 6 +++---
+ lib/rtsp.c | 4 ++--
+ lib/setopt.c | 6 ++----
+ lib/smb.c | 6 +++---
+ lib/smtp.c | 4 ++--
+ lib/tftp.c | 8 ++++----
+ lib/transfer.c | 4 ++--
+ lib/urldata.h | 2 +-
+ lib/vssh/libssh.c | 6 +++---
+ lib/vssh/libssh2.c | 6 +++---
+ lib/vssh/wolfssh.c | 2 +-
+ 15 files changed, 36 insertions(+), 38 deletions(-)
+
+--- a/lib/curl_rtmp.c
++++ b/lib/curl_rtmp.c
+@@ -213,7 +213,7 @@ static CURLcode rtmp_connect(struct conn
+ /* We have to know if it's a write before we send the
+ * connect request packet
+ */
+- if(conn->data->set.upload)
++ if(conn->data->state.upload)
+ r->Link.protocol |= RTMP_FEATURE_WRITE;
+
+ /* For plain streams, use the buffer toggle trick to keep data flowing */
+@@ -245,7 +245,7 @@ static CURLcode rtmp_do(struct connectda
+ if(!RTMP_ConnectStream(r, 0))
+ return CURLE_FAILED_INIT;
+
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ Curl_pgrsSetUploadSize(data, data->state.infilesize);
+ Curl_setup_transfer(data, -1, -1, FALSE, FIRSTSOCKET);
+ }
+--- a/lib/file.c
++++ b/lib/file.c
+@@ -198,7 +198,7 @@ static CURLcode file_connect(struct conn
+ file->freepath = real_path; /* free this when done */
+
+ file->fd = fd;
+- if(!data->set.upload && (fd == -1)) {
++ if(!data->state.upload && (fd == -1)) {
+ failf(data, "Couldn't open file %s", data->state.up.path);
+ file_done(conn, CURLE_FILE_COULDNT_READ_FILE, FALSE);
+ return CURLE_FILE_COULDNT_READ_FILE;
+@@ -390,7 +390,7 @@ static CURLcode file_do(struct connectda
+
+ Curl_pgrsStartNow(data);
+
+- if(data->set.upload)
++ if(data->state.upload)
+ return file_upload(conn);
+
+ file = conn->data->req.protop;
+--- a/lib/ftp.c
++++ b/lib/ftp.c
+@@ -1371,7 +1371,7 @@ static CURLcode ftp_state_prepare_transf
+ data->set.str[STRING_CUSTOMREQUEST]:
+ (data->set.ftp_list_only?"NLST":"LIST"));
+ }
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ PPSENDF(&conn->proto.ftpc.pp, "PRET STOR %s", conn->proto.ftpc.file);
+ }
+ else {
+@@ -3303,7 +3303,7 @@ static CURLcode ftp_done(struct connectd
+ /* the response code from the transfer showed an error already so no
+ use checking further */
+ ;
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ if((-1 != data->state.infilesize) &&
+ (data->state.infilesize != data->req.writebytecount) &&
+ !data->set.crlf &&
+@@ -3570,7 +3570,7 @@ static CURLcode ftp_do_more(struct conne
+ connected back to us */
+ }
+ }
+- else if(data->set.upload) {
++ else if(data->state.upload) {
+ result = ftp_nb_type(conn, data->set.prefer_ascii, FTP_STOR_TYPE);
+ if(result)
+ return result;
+@@ -4209,7 +4209,7 @@ CURLcode ftp_parse_url_path(struct conne
+ ftpc->file = NULL; /* instead of point to a zero byte,
+ we make it a NULL pointer */
+
+- if(data->set.upload && !ftpc->file && (ftp->transfer == FTPTRANSFER_BODY)) {
++ if(data->state.upload && !ftpc->file && (ftp->transfer == FTPTRANSFER_BODY)) {
+ /* We need a file name when uploading. Return error! */
+ failf(data, "Uploading to a URL without a file name!");
+ free(rawPath);
+--- a/lib/http.c
++++ b/lib/http.c
+@@ -2080,7 +2080,7 @@ CURLcode Curl_http(struct connectdata *c
+ }
+
+ if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
+- data->set.upload) {
++ data->state.upload) {
+ httpreq = HTTPREQ_PUT;
+ }
+
+@@ -2261,7 +2261,7 @@ CURLcode Curl_http(struct connectdata *c
+ if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
+ http->postsize < 0) ||
+- ((data->set.upload || httpreq == HTTPREQ_POST) &&
++ ((data->state.upload || httpreq == HTTPREQ_POST) &&
+ data->state.infilesize == -1))) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+--- a/lib/imap.c
++++ b/lib/imap.c
+@@ -1469,11 +1469,11 @@ static CURLcode imap_done(struct connect
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && !imap->custom &&
+- (imap->uid || imap->mindex || data->set.upload ||
++ (imap->uid || imap->mindex || data->state.upload ||
+ data->set.mimepost.kind != MIMEKIND_NONE)) {
+ /* Handle responses after FETCH or APPEND transfer has finished */
+
+- if(!data->set.upload && data->set.mimepost.kind == MIMEKIND_NONE)
++ if(!data->state.upload && data->set.mimepost.kind == MIMEKIND_NONE)
+ state(conn, IMAP_FETCH_FINAL);
+ else {
+ /* End the APPEND command first by sending an empty line */
+@@ -1539,7 +1539,7 @@ static CURLcode imap_perform(struct conn
+ selected = TRUE;
+
+ /* Start the first command in the DO phase */
+- if(conn->data->set.upload || data->set.mimepost.kind != MIMEKIND_NONE)
++ if(conn->data->state.upload || data->set.mimepost.kind != MIMEKIND_NONE)
+ /* APPEND can be executed directly */
+ result = imap_perform_append(conn);
+ else if(imap->custom && (selected || !imap->mailbox))
+--- a/lib/rtsp.c
++++ b/lib/rtsp.c
+@@ -499,7 +499,7 @@ static CURLcode rtsp_do(struct connectda
+ rtspreq == RTSPREQ_SET_PARAMETER ||
+ rtspreq == RTSPREQ_GET_PARAMETER) {
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ putsize = data->state.infilesize;
+ data->set.httpreq = HTTPREQ_PUT;
+
+@@ -518,7 +518,7 @@ static CURLcode rtsp_do(struct connectda
+ result =
+ Curl_add_bufferf(&req_buffer,
+ "Content-Length: %" CURL_FORMAT_CURL_OFF_T"\r\n",
+- (data->set.upload ? putsize : postsize));
++ (data->state.upload ? putsize : postsize));
+ if(result)
+ return result;
+ }
+--- a/lib/setopt.c
++++ b/lib/setopt.c
+@@ -258,8 +258,8 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ * We want to sent data to the remote host. If this is HTTP, that equals
+ * using the PUT request.
+ */
+- data->set.upload = (0 != va_arg(param, long)) ? TRUE : FALSE;
+- if(data->set.upload) {
++ arg = va_arg(param, long);
++ if(arg) {
+ /* If this is HTTP, PUT is what's needed to "upload" */
+ data->set.httpreq = HTTPREQ_PUT;
+ data->set.opt_no_body = FALSE; /* this is implied */
+@@ -486,7 +486,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ }
+ else
+ data->set.httpreq = HTTPREQ_GET;
+- data->set.upload = FALSE;
+ break;
+
+ case CURLOPT_COPYPOSTFIELDS:
+@@ -797,7 +796,6 @@ CURLcode Curl_vsetopt(struct Curl_easy *
+ */
+ if(va_arg(param, long)) {
+ data->set.httpreq = HTTPREQ_GET;
+- data->set.upload = FALSE; /* switch off upload */
+ data->set.opt_no_body = FALSE; /* this is implied */
+ }
+ break;
+--- a/lib/smb.c
++++ b/lib/smb.c
+@@ -516,7 +516,7 @@ static CURLcode smb_send_open(struct con
+ byte_count = strlen(req->path);
+ msg.name_length = smb_swap16((unsigned short)byte_count);
+ msg.share_access = smb_swap32(SMB_FILE_SHARE_ALL);
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ msg.access = smb_swap32(SMB_GENERIC_READ | SMB_GENERIC_WRITE);
+ msg.create_disposition = smb_swap32(SMB_FILE_OVERWRITE_IF);
+ }
+@@ -792,7 +792,7 @@ static CURLcode smb_request_state(struct
+ smb_m = (const struct smb_nt_create_response*) msg;
+ req->fid = smb_swap16(smb_m->fid);
+ conn->data->req.offset = 0;
+- if(conn->data->set.upload) {
++ if(conn->data->state.upload) {
+ conn->data->req.size = conn->data->state.infilesize;
+ Curl_pgrsSetUploadSize(conn->data, conn->data->req.size);
+ next_state = SMB_UPLOAD;
+--- a/lib/smtp.c
++++ b/lib/smtp.c
+@@ -1210,7 +1210,7 @@ static CURLcode smtp_done(struct connect
+ result = status; /* use the already set error code */
+ }
+ else if(!data->set.connect_only && data->set.mail_rcpt &&
+- (data->set.upload || data->set.mimepost.kind)) {
++ (data->state.upload || data->set.mimepost.kind)) {
+ /* Calculate the EOB taking into account any terminating CRLF from the
+ previous line of the email or the CRLF of the DATA command when there
+ is "no mail data". RFC-5321, sect. 4.1.1.4.
+@@ -1297,7 +1297,7 @@ static CURLcode smtp_perform(struct conn
+ smtp->eob = 2;
+
+ /* Start the first command in the DO phase */
+- if((data->set.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
++ if((data->state.upload || data->set.mimepost.kind) && data->set.mail_rcpt)
+ /* MAIL transfer */
+ result = smtp_perform_mail(conn);
+ else
+--- a/lib/tftp.c
++++ b/lib/tftp.c
+@@ -390,7 +390,7 @@ static CURLcode tftp_parse_option_ack(tf
+
+ /* tsize should be ignored on upload: Who cares about the size of the
+ remote file? */
+- if(!data->set.upload) {
++ if(!data->state.upload) {
+ if(!tsize) {
+ failf(data, "invalid tsize -:%s:- value in OACK packet", value);
+ return CURLE_TFTP_ILLEGAL;
+@@ -470,7 +470,7 @@ static CURLcode tftp_send_first(tftp_sta
+ return result;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ /* If we are uploading, send an WRQ */
+ setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
+ state->conn->data->req.upload_fromhere =
+@@ -505,7 +505,7 @@ static CURLcode tftp_send_first(tftp_sta
+ if(!data->set.tftp_no_options) {
+ char buf[64];
+ /* add tsize option */
+- if(data->set.upload && (data->state.infilesize != -1))
++ if(data->state.upload && (data->state.infilesize != -1))
+ msnprintf(buf, sizeof(buf), "%" CURL_FORMAT_CURL_OFF_T,
+ data->state.infilesize);
+ else
+@@ -559,7 +559,7 @@ static CURLcode tftp_send_first(tftp_sta
+ break;
+
+ case TFTP_EVENT_OACK:
+- if(data->set.upload) {
++ if(data->state.upload) {
+ result = tftp_connect_for_tx(state, event);
+ }
+ else {
+--- a/lib/transfer.c
++++ b/lib/transfer.c
+@@ -1405,6 +1405,7 @@ void Curl_init_CONNECT(struct Curl_easy
+ {
+ data->state.fread_func = data->set.fread_func_set;
+ data->state.in = data->set.in_set;
++ data->state.upload = (data->set.httpreq == HTTPREQ_PUT);
+ }
+
+ /*
+@@ -1816,7 +1817,7 @@ CURLcode Curl_retry_request(struct conne
+
+ /* if we're talking upload, we can't do the checks below, unless the protocol
+ is HTTP as when uploading over HTTP we will still get a response */
+- if(data->set.upload &&
++ if(data->state.upload &&
+ !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
+ return CURLE_OK;
+
+--- a/lib/urldata.h
++++ b/lib/urldata.h
+@@ -1427,6 +1427,7 @@ struct UrlState {
+ BIT(stream_depends_e); /* set or don't set the Exclusive bit */
+ BIT(previouslypending); /* this transfer WAS in the multi->pending queue */
+ BIT(cookie_engine);
++ BIT(upload); /* upload request */
+ };
+
+
+@@ -1762,7 +1763,6 @@ struct UserDefined {
+ BIT(http_auto_referer); /* set "correct" referer when following
+ location: */
+ BIT(opt_no_body); /* as set with CURLOPT_NOBODY */
+- BIT(upload); /* upload request */
+ BIT(verbose); /* output verbosity */
+ BIT(krb); /* Kerberos connection requested */
+ BIT(reuse_forbid); /* forbidden to be reused, close after use */
+--- a/lib/vssh/libssh.c
++++ b/lib/vssh/libssh.c
+@@ -1076,7 +1076,7 @@ static CURLcode myssh_statemach_act(stru
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(protop->path[strlen(protop->path)-1] == '/')
+@@ -1686,7 +1686,7 @@ static CURLcode myssh_statemach_act(stru
+ /* Functions from the SCP subsystem cannot handle/return SSH_AGAIN */
+ ssh_set_blocking(sshc->ssh_session, 1);
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -1787,7 +1787,7 @@ static CURLcode myssh_statemach_act(stru
+ break;
+ }
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SCP_SEND_EOF);
+ else
+ state(conn, SSH_SCP_CHANNEL_FREE);
+--- a/lib/vssh/libssh2.c
++++ b/lib/vssh/libssh2.c
+@@ -1664,7 +1664,7 @@ static CURLcode ssh_statemach_act(struct
+ }
+
+ case SSH_SFTP_TRANS_INIT:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SFTP_UPLOAD_INIT);
+ else {
+ if(sftp_scp->path[strlen(sftp_scp->path)-1] == '/')
+@@ -2366,7 +2366,7 @@ static CURLcode ssh_statemach_act(struct
+ break;
+ }
+
+- if(data->set.upload) {
++ if(data->state.upload) {
+ if(data->state.infilesize < 0) {
+ failf(data, "SCP requires a known file size for upload");
+ sshc->actualcode = CURLE_UPLOAD_FAILED;
+@@ -2504,7 +2504,7 @@ static CURLcode ssh_statemach_act(struct
+ break;
+
+ case SSH_SCP_DONE:
+- if(data->set.upload)
++ if(data->state.upload)
+ state(conn, SSH_SCP_SEND_EOF);
+ else
+ state(conn, SSH_SCP_CHANNEL_FREE);
diff --git a/meta/recipes-support/curl/curl/CVE-2023-32001.patch b/meta/recipes-support/curl/curl/CVE-2023-32001.patch
new file mode 100644
index 0000000000..f533992bcd
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-32001.patch
@@ -0,0 +1,38 @@
+From 0c667188e0c6cda615a036b8a2b4125f2c404dde Mon Sep 17 00:00:00 2001
+From: SaltyMilk <soufiane.elmelcaoui@gmail.com>
+Date: Mon, 10 Jul 2023 21:43:28 +0200
+Subject: [PATCH] fopen: optimize
+
+Closes #11419
+
+Upstream-Status: Backport [https://github.com/curl/curl/commit/0c667188e0c6cda615a036b8a2b4125f2c404dde]
+CVE: CVE-2023-32001
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ lib/fopen.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/lib/fopen.c b/lib/fopen.c
+index c9c9e3d6e73a2..b6e3cadddef65 100644
+--- a/lib/fopen.c
++++ b/lib/fopen.c
+@@ -56,13 +56,13 @@ CURLcode Curl_fopen(struct Curl_easy *data, const char *filename,
+ int fd = -1;
+ *tempname = NULL;
+
+- if(stat(filename, &sb) == -1 || !S_ISREG(sb.st_mode)) {
+- /* a non-regular file, fallback to direct fopen() */
+- *fh = fopen(filename, FOPEN_WRITETEXT);
+- if(*fh)
+- return CURLE_OK;
++ *fh = fopen(filename, FOPEN_WRITETEXT);
++ if(!*fh)
+ goto fail;
+- }
++ if(fstat(fileno(*fh), &sb) == -1 || !S_ISREG(sb.st_mode))
++ return CURLE_OK;
++ fclose(*fh);
++ *fh = NULL;
+
+ result = Curl_rand_hex(data, randsuffix, sizeof(randsuffix));
+ if(result)
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38545.patch b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
new file mode 100644
index 0000000000..c6b6726886
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38545.patch
@@ -0,0 +1,148 @@
+From 600a1caeb2312fdee5ef1caf7d613c12a8b2424a Mon Sep 17 00:00:00 2001
+From: Mike Crowe <mac@mcrowe.com>
+Date: Wed, 11 Oct 2023 20:50:28 +0100
+Subject: [PATCH] socks: return error if hostname too long for remote resolve
+To: libcurl development <curl-library@cool.haxx.se>
+
+Prior to this change the state machine attempted to change the remote
+resolve to a local resolve if the hostname was longer than 255
+characters. Unfortunately that did not work as intended and caused a
+security issue.
+
+Name resolvers cannot resolve hostnames longer than 255 characters.
+
+Bug: https://curl.se/docs/CVE-2023-38545.html
+
+Unfortunately CURLE_PROXY and CURLPX_LONG_HOSTNAME were introduced in
+7.73.0 so they can't be used in 7.69.1. Let's use
+CURLE_COULDNT_RESOLVE_HOST as the best available alternative and update
+the test appropriately.
+
+libcurl's test support has been improved considerably since 7.69.1 which
+means that the test must be modified to remove use of %VERSION and
+%TESTNUMBER and the stderr output can no longer be checked.
+
+CVE: CVE-2023-38545
+Upstream-Status: Backport [fb4415d8aee6c1045be932a34fe6107c2f5ed147]
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/socks.c | 13 +++++----
+ tests/data/Makefile.inc | 2 +-
+ tests/data/test728 | 60 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 69 insertions(+), 6 deletions(-)
+ create mode 100644 tests/data/test728
+
+diff --git a/lib/socks.c b/lib/socks.c
+index 37099130e..f3bf40533 100644
+--- a/lib/socks.c
++++ b/lib/socks.c
+@@ -521,11 +521,14 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
+ infof(conn->data, "SOCKS5: connecting to HTTP proxy %s port %d\n",
+ hostname, remote_port);
+
+- /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet */
++ /* RFC1928 chapter 5 specifies max 255 chars for domain name in packet. */
+ if(!socks5_resolve_local && hostname_len > 255) {
+- infof(conn->data, "SOCKS5: server resolving disabled for hostnames of "
+- "length > 255 [actual len=%zu]\n", hostname_len);
+- socks5_resolve_local = TRUE;
++ failf(data, "SOCKS5: the destination hostname is too long to be "
++ "resolved remotely by the proxy.");
++ /* This version of libcurl doesn't have CURLE_PROXY and
++ * therefore CURLPX_LONG_HOSTNAME, so let's report the best we
++ * can. */
++ return CURLE_COULDNT_RESOLVE_HOST;
+ }
+
+ if(auth & ~(CURLAUTH_BASIC | CURLAUTH_GSSAPI))
+@@ -837,7 +840,7 @@ CURLcode Curl_SOCKS5(const char *proxy_user,
+
+ if(!socks5_resolve_local) {
+ socksreq[len++] = 3; /* ATYP: domain name = 3 */
+- socksreq[len++] = (char) hostname_len; /* one byte address length */
++ socksreq[len++] = (unsigned char) hostname_len; /* one byte length */
+ memcpy(&socksreq[len], hostname, hostname_len); /* address w/o NULL */
+ len += hostname_len;
+ infof(data, "SOCKS5 connect to %s:%d (remotely resolved)\n",
+diff --git a/tests/data/Makefile.inc b/tests/data/Makefile.inc
+index 3d8565c36..5ee2284ff 100644
+--- a/tests/data/Makefile.inc
++++ b/tests/data/Makefile.inc
+@@ -89,7 +89,7 @@ test662 test663 test664 test665 test666 test667 test668 \
+ test670 test671 test672 test673 \
+ \
+ test700 test701 test702 test703 test704 test705 test706 test707 test708 \
+-test709 test710 test711 test712 test713 test714 test715 test716 test717 \
++test709 test710 test711 test712 test713 test714 test715 test716 test717 test728 \
+ \
+ test800 test801 test802 test803 test804 test805 test806 test807 test808 \
+ test809 test810 test811 test812 test813 test814 test815 test816 test817 \
+diff --git a/tests/data/test728 b/tests/data/test728
+new file mode 100644
+index 000000000..7b1d8b2f3
+--- /dev/null
++++ b/tests/data/test728
+@@ -0,0 +1,60 @@
++<testcase>
++<info>
++<keywords>
++HTTP
++HTTP GET
++SOCKS5
++SOCKS5h
++followlocation
++</keywords>
++</info>
++
++#
++# Server-side
++<reply>
++# The hostname in this redirect is 256 characters and too long (> 255) for
++# SOCKS5 remote resolve. curl must return error CURLE_PROXY in this case.
++<data>
++HTTP/1.1 301 Moved Permanently
++Location: http://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/
++Content-Length: 0
++Connection: close
++
++</data>
++</reply>
++
++#
++# Client-side
++<client>
++<features>
++proxy
++</features>
++<server>
++http
++socks5
++</server>
++ <name>
++SOCKS5h with HTTP redirect to hostname too long
++ </name>
++ <command>
++--no-progress-meter --location --proxy socks5h://%HOSTIP:%SOCKSPORT http://%HOSTIP:%HTTPPORT/728
++</command>
++</client>
++
++#
++# Verify data after the test has been "shot"
++<verify>
++<strip>
++^User-Agent:.*
++</strip>
++<protocol>
++GET /728 HTTP/1.1
++Host: %HOSTIP:%HTTPPORT
++Accept: */*
++
++</protocol>
++<errorcode>
++6
++</errorcode>
++</verify>
++</testcase>
+--
+2.39.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-38546.patch b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
new file mode 100644
index 0000000000..30ef2fd038
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-38546.patch
@@ -0,0 +1,132 @@
+From 7b67721f12cbe6ed1a41e7332f3b5a7186a5e23f Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 14 Sep 2023 23:28:32 +0200
+Subject: [PATCH] cookie: remove unnecessary struct fields
+To: libcurl development <curl-library@cool.haxx.se>
+
+Plus: reduce the hash table size from 256 to 63. It seems unlikely to
+make much of a speed difference for most use cases but saves 1.5KB of
+data per instance.
+
+Closes #11862
+
+This patch taken from Debian's 7.64.0-4+deb10u7 package which applied with
+only a little fuzz.
+
+CVE: CVE-2023-38546
+Upstream-Status: Backport [61275672b46d9abb32857404]
+Signed-off-by: Mike Crowe <mac@mcrowe.com>
+---
+ lib/cookie.c | 13 +------------
+ lib/cookie.h | 7 ++-----
+ lib/easy.c | 4 +---
+ 3 files changed, 4 insertions(+), 20 deletions(-)
+
+diff --git a/lib/cookie.c b/lib/cookie.c
+index 68054e1c4..a378f28e1 100644
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -114,7 +114,6 @@ static void freecookie(struct Cookie *co)
+ free(co->name);
+ free(co->value);
+ free(co->maxage);
+- free(co->version);
+ free(co);
+ }
+
+@@ -641,11 +640,7 @@ Curl_cookie_add(struct Curl_easy *data,
+ }
+ }
+ else if(strcasecompare("version", name)) {
+- strstore(&co->version, whatptr);
+- if(!co->version) {
+- badcookie = TRUE;
+- break;
+- }
++ /* just ignore */
+ }
+ else if(strcasecompare("max-age", name)) {
+ /* Defined in RFC2109:
+@@ -1042,7 +1037,6 @@ Curl_cookie_add(struct Curl_easy *data,
+ free(clist->path);
+ free(clist->spath);
+ free(clist->expirestr);
+- free(clist->version);
+ free(clist->maxage);
+
+ *clist = *co; /* then store all the new data */
+@@ -1111,9 +1105,6 @@ struct CookieInfo *Curl_cookie_init(struct Curl_easy *data,
+ c = calloc(1, sizeof(struct CookieInfo));
+ if(!c)
+ return NULL; /* failed to get memory */
+- c->filename = strdup(file?file:"none"); /* copy the name just in case */
+- if(!c->filename)
+- goto fail; /* failed to get memory */
+ }
+ else {
+ /* we got an already existing one, use that */
+@@ -1241,7 +1232,6 @@ static struct Cookie *dup_cookie(struct Cookie *src)
+ CLONE(name);
+ CLONE(value);
+ CLONE(maxage);
+- CLONE(version);
+ d->expires = src->expires;
+ d->tailmatch = src->tailmatch;
+ d->secure = src->secure;
+@@ -1457,7 +1447,6 @@ void Curl_cookie_cleanup(struct CookieInfo *c)
+ {
+ if(c) {
+ unsigned int i;
+- free(c->filename);
+ for(i = 0; i < COOKIE_HASH_SIZE; i++)
+ Curl_cookie_freelist(c->cookies[i]);
+ free(c); /* free the base struct as well */
+diff --git a/lib/cookie.h b/lib/cookie.h
+index b3865e601..2e667cda0 100644
+--- a/lib/cookie.h
++++ b/lib/cookie.h
+@@ -36,8 +36,6 @@ struct Cookie {
+ char *expirestr; /* the plain text version */
+ bool tailmatch; /* whether we do tail-matching of the domain name */
+
+- /* RFC 2109 keywords. Version=1 means 2109-compliant cookie sending */
+- char *version; /* Version = <value> */
+ char *maxage; /* Max-Age = <value> */
+
+ bool secure; /* whether the 'secure' keyword was used */
+@@ -54,15 +52,14 @@ struct Cookie {
+ #define COOKIE_PREFIX__SECURE (1<<0)
+ #define COOKIE_PREFIX__HOST (1<<1)
+
+-#define COOKIE_HASH_SIZE 256
++#define COOKIE_HASH_SIZE 63
+
+ struct CookieInfo {
+ /* linked list of cookies we know of */
+ struct Cookie *cookies[COOKIE_HASH_SIZE];
+
+- char *filename; /* file we read from/write to */
+ bool running; /* state info, for cookie adding information */
+- long numcookies; /* number of cookies in the "jar" */
++ int numcookies; /* number of cookies in the "jar" */
+ bool newsession; /* new session, discard session cookies on load */
+ int lastct; /* last creation-time used in the jar */
+ };
+diff --git a/lib/easy.c b/lib/easy.c
+index b648e80c1..cdca0fb03 100644
+--- a/lib/easy.c
++++ b/lib/easy.c
+@@ -840,9 +840,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data)
+ if(data->cookies) {
+ /* If cookies are enabled in the parent handle, we enable them
+ in the clone as well! */
+- outcurl->cookies = Curl_cookie_init(data,
+- data->cookies->filename,
+- outcurl->cookies,
++ outcurl->cookies = Curl_cookie_init(data, NULL, outcurl->cookies,
+ data->set.cookiesession);
+ if(!outcurl->cookies)
+ goto fail;
+--
+2.39.2
+
diff --git a/meta/recipes-support/curl/curl/CVE-2023-46218.patch b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
new file mode 100644
index 0000000000..c9677b6a84
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2023-46218.patch
@@ -0,0 +1,52 @@
+CVE: CVE-2023-46218
+Upstream-Status: Backport [ import from ubuntu http://archive.ubuntu.com/ubuntu/pool/main/c/curl/curl_7.68.0-1ubuntu2.21.debian.tar.xz upstream https://github.com/curl/curl/commit/2b0994c29a721c91c57 ]
+Signed-off-by: Lee Chee Yang <chee.yang.lee@intel.com>
+
+Backport of:
+
+From 2b0994c29a721c91c572cff7808c572a24d251eb Mon Sep 17 00:00:00 2001
+From: Daniel Stenberg <daniel@haxx.se>
+Date: Thu, 23 Nov 2023 08:15:47 +0100
+Subject: [PATCH] cookie: lowercase the domain names before PSL checks
+
+Reported-by: Harry Sintonen
+
+Closes #12387
+---
+ lib/cookie.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/lib/cookie.c
++++ b/lib/cookie.c
+@@ -967,15 +967,23 @@ Curl_cookie_add(struct Curl_easy *data,
+ #ifdef USE_LIBPSL
+ /* Check if the domain is a Public Suffix and if yes, ignore the cookie. */
+ if(domain && co->domain && !isip(co->domain)) {
+- const psl_ctx_t *psl = Curl_psl_use(data);
+- int acceptable;
+-
+- if(psl) {
+- acceptable = psl_is_cookie_domain_acceptable(psl, domain, co->domain);
+- Curl_psl_release(data);
++ bool acceptable = FALSE;
++ char lcase[256];
++ char lcookie[256];
++ size_t dlen = strlen(domain);
++ size_t clen = strlen(co->domain);
++ if((dlen < sizeof(lcase)) && (clen < sizeof(lcookie))) {
++ const psl_ctx_t *psl = Curl_psl_use(data);
++ if(psl) {
++ /* the PSL check requires lowercase domain name and pattern */
++ Curl_strntolower(lcase, domain, dlen + 1);
++ Curl_strntolower(lcookie, co->domain, clen + 1);
++ acceptable = psl_is_cookie_domain_acceptable(psl, lcase, lcookie);
++ Curl_psl_release(data);
++ }
++ else
++ acceptable = !bad_domain(domain);
+ }
+- else
+- acceptable = !bad_domain(domain);
+
+ if(!acceptable) {
+ infof(data, "cookie '%s' dropped, domain '%s' must not "
diff --git a/meta/recipes-support/curl/curl/CVE-2024-2398.patch b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
new file mode 100644
index 0000000000..a3840336f0
--- /dev/null
+++ b/meta/recipes-support/curl/curl/CVE-2024-2398.patch
@@ -0,0 +1,88 @@
+Backport of:
+
+From deca8039991886a559b67bcd6701db800a5cf764 Mon Sep 17 00:00:00 2001
+From: Stefan Eissing <stefan@eissing.org>
+Date: Wed, 6 Mar 2024 09:36:08 +0100
+Subject: [PATCH] http2: push headers better cleanup
+
+- provide common cleanup method for push headers
+
+Closes #13054
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/curl/tree/debian/patches/CVE-2024-2398.patch?h=ubuntu/focal-security
+Upstream commit https://github.com/curl/curl/commit/deca8039991886a559b67bcd6701db800a5cf764]
+CVE: CVE-2024-2398
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/http2.c | 34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+--- a/lib/http2.c
++++ b/lib/http2.c
+@@ -515,6 +515,15 @@ static struct Curl_easy *duphandle(struc
+ }
+
+
++static void free_push_headers(struct HTTP *stream)
++{
++ size_t i;
++ for(i = 0; i<stream->push_headers_used; i++)
++ free(stream->push_headers[i]);
++ Curl_safefree(stream->push_headers);
++ stream->push_headers_used = 0;
++}
++
+ static int push_promise(struct Curl_easy *data,
+ struct connectdata *conn,
+ const nghttp2_push_promise *frame)
+@@ -528,7 +537,6 @@ static int push_promise(struct Curl_easy
+ struct curl_pushheaders heads;
+ CURLMcode rc;
+ struct http_conn *httpc;
+- size_t i;
+ /* clone the parent */
+ struct Curl_easy *newhandle = duphandle(data);
+ if(!newhandle) {
+@@ -557,11 +565,7 @@ static int push_promise(struct Curl_easy
+ Curl_set_in_callback(data, false);
+
+ /* free the headers again */
+- for(i = 0; i<stream->push_headers_used; i++)
+- free(stream->push_headers[i]);
+- free(stream->push_headers);
+- stream->push_headers = NULL;
+- stream->push_headers_used = 0;
++ free_push_headers(stream);
+
+ if(rv) {
+ /* denied, kill off the new handle again */
+@@ -995,10 +999,10 @@ static int on_header(nghttp2_session *se
+ stream->push_headers_alloc) {
+ char **headp;
+ stream->push_headers_alloc *= 2;
+- headp = Curl_saferealloc(stream->push_headers,
+- stream->push_headers_alloc * sizeof(char *));
++ headp = realloc(stream->push_headers,
++ stream->push_headers_alloc * sizeof(char *));
+ if(!headp) {
+- stream->push_headers = NULL;
++ free_push_headers(stream);
+ return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
+ }
+ stream->push_headers = headp;
+@@ -1179,14 +1183,7 @@ void Curl_http2_done(struct Curl_easy *d
+ if(http->header_recvbuf) {
+ Curl_add_buffer_free(&http->header_recvbuf);
+ Curl_add_buffer_free(&http->trailer_recvbuf);
+- if(http->push_headers) {
+- /* if they weren't used and then freed before */
+- for(; http->push_headers_used > 0; --http->push_headers_used) {
+- free(http->push_headers[http->push_headers_used - 1]);
+- }
+- free(http->push_headers);
+- http->push_headers = NULL;
+- }
++ free_push_headers(http);
+ }
+
+ if(!httpc->h2) /* not HTTP/2 ? */
diff --git a/meta/recipes-support/curl/curl_7.69.1.bb b/meta/recipes-support/curl/curl_7.69.1.bb
index bc1b993e9e..2f351d585a 100644
--- a/meta/recipes-support/curl/curl_7.69.1.bb
+++ b/meta/recipes-support/curl/curl_7.69.1.bb
@@ -25,6 +25,40 @@ SRC_URI = "https://curl.haxx.se/download/curl-${PV}.tar.bz2 \
file://CVE-2021-22946-pre1.patch \
file://CVE-2021-22946.patch \
file://CVE-2021-22947.patch \
+ file://CVE-2022-27776.patch \
+ file://CVE-2022-27775.patch \
+ file://CVE-2022-22576.patch \
+ file://CVE-2022-27774-1.patch \
+ file://CVE-2022-27774-2.patch \
+ file://CVE-2022-27774-3.patch \
+ file://CVE-2022-27774-4.patch \
+ file://CVE-2022-27781.patch \
+ file://CVE-2022-27782-1.patch \
+ file://CVE-2022-27782-2.patch \
+ file://CVE-2022-32206.patch \
+ file://CVE-2022-32207.patch \
+ file://CVE-2022-32208.patch \
+ file://CVE-2022-35252.patch \
+ file://CVE-2022-32221.patch \
+ file://CVE-2022-35260.patch \
+ file://CVE-2022-43552.patch \
+ file://CVE-2023-23916.patch \
+ file://CVE-2023-27534-pre1.patch \
+ file://CVE-2023-27534.patch \
+ file://CVE-2023-27538.patch \
+ file://CVE-2023-27533.patch \
+ file://CVE-2023-27535-pre1.patch \
+ file://CVE-2023-27535.patch \
+ file://CVE-2023-27536.patch \
+ file://CVE-2023-28320.patch \
+ file://CVE-2023-28320-fol1.patch \
+ file://CVE-2023-32001.patch \
+ file://CVE-2023-38545.patch \
+ file://CVE-2023-38546.patch \
+ file://CVE-2023-28321.patch \
+ file://CVE-2023-28322.patch \
+ file://CVE-2023-46218.patch \
+ file://CVE-2024-2398.patch \
"
SRC_URI[md5sum] = "ec5fc263f898a3dfef08e805f1ecca42"
@@ -32,13 +66,16 @@ SRC_URI[sha256sum] = "2ff5e5bd507adf6aa88ff4bbafd4c7af464867ffb688be93b9930717a5
# Curl has used many names over the years...
CVE_PRODUCT = "haxx:curl haxx:libcurl curl:curl curl:libcurl libcurl:libcurl daniel_stenberg:curl"
-CVE_CHECK_WHITELIST = "CVE-2021-22922 CVE-2021-22923 CVE-2021-22926 CVE-22945"
+CVE_CHECK_WHITELIST = "CVE-2021-22922 CVE-2021-22923 CVE-2021-22926 CVE-2021-22945"
# As per link https://security-tracker.debian.org/tracker/CVE-2021-22897
# and https://ubuntu.com/security/CVE-2021-22897
# This CVE issue affects Windows only Hence whitelisting this CVE
CVE_CHECK_WHITELIST += "CVE-2021-22897"
+# This CVE reports that apple had to upgrade curl because of other already reported CVEs
+CVE_CHECK_WHITELIST += "CVE-2023-42915"
+
inherit autotools pkgconfig binconfig multilib_header
PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib"
diff --git a/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch b/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch
new file mode 100644
index 0000000000..5992949d35
--- /dev/null
+++ b/meta/recipes-support/gnupg/gnupg/CVE-2022-34903.patch
@@ -0,0 +1,44 @@
+From 2f05fc96b1332caf97176841b1152da3f0aa16a8 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 22 Jul 2022 17:52:36 +0530
+Subject: [PATCH] CVE-2022-34903
+
+Upstream-Status: Backport [https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=commit;h=34c649b3601383cd11dbc76221747ec16fd68e1b]
+CVE: CVE-2022-34903
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ g10/cpr.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/g10/cpr.c b/g10/cpr.c
+index d502e8b..bc4b715 100644
+--- a/g10/cpr.c
++++ b/g10/cpr.c
+@@ -328,20 +328,15 @@ write_status_text_and_buffer (int no, const char *string,
+ }
+ first = 0;
+ }
+- for (esc=0, s=buffer, n=len; n && !esc; s++, n--)
++ for (esc=0, s=buffer, n=len; n; s++, n--)
+ {
+ if (*s == '%' || *(const byte*)s <= lower_limit
+ || *(const byte*)s == 127 )
+ esc = 1;
+ if (wrap && ++count > wrap)
+- {
+- dowrap=1;
+- break;
+- }
+- }
+- if (esc)
+- {
+- s--; n++;
++ dowrap=1;
++ if (esc || dowrap)
++ break;
+ }
+ if (s != buffer)
+ es_fwrite (buffer, s-buffer, 1, statusfp);
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnupg/gnupg_2.2.27.bb b/meta/recipes-support/gnupg/gnupg_2.2.27.bb
index 18bb855769..bd09b02017 100644
--- a/meta/recipes-support/gnupg/gnupg_2.2.27.bb
+++ b/meta/recipes-support/gnupg/gnupg_2.2.27.bb
@@ -20,6 +20,7 @@ SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
file://0003-dirmngr-uses-libgpg-error.patch \
file://0004-autogen.sh-fix-find-version-for-beta-checking.patch \
file://0001-Woverride-init-is-not-needed-with-gcc-9.patch \
+ file://CVE-2022-34903.patch \
"
SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \
file://relocate.patch"
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch b/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch
new file mode 100644
index 0000000000..0bcb55e573
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2021-4209.patch
@@ -0,0 +1,37 @@
+From 3db352734472d851318944db13be73da61300568 Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Wed, 22 Dec 2021 09:12:25 +0100
+Subject: [PATCH] wrap_nettle_hash_fast: avoid calling _update with zero-length
+ input
+
+As Nettle's hash update functions internally call memcpy, providing
+zero-length input may cause undefined behavior.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+https://gitlab.com/gnutls/gnutls/-/commit/3db352734472d851318944db13be73da61300568
+Upstream-Status: Backport
+CVE: CVE-2021-4209
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ lib/nettle/mac.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/lib/nettle/mac.c b/lib/nettle/mac.c
+index f9d4d7a8df..35e070fab0 100644
+--- a/lib/nettle/mac.c
++++ b/lib/nettle/mac.c
+@@ -788,7 +788,9 @@ static int wrap_nettle_hash_fast(gnutls_digest_algorithm_t algo,
+ if (ret < 0)
+ return gnutls_assert_val(ret);
+
+- ctx.update(&ctx, text_size, text);
++ if (text_size > 0) {
++ ctx.update(&ctx, text_size, text);
++ }
+ ctx.digest(&ctx, ctx.length, digest);
+
+ return 0;
+--
+GitLab
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
new file mode 100644
index 0000000000..f8954945d0
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2022-2509.patch
@@ -0,0 +1,282 @@
+From 9835638d4e1f37781a47e777c76d5bb14218929b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Tue, 16 Aug 2022 12:23:14 +0530
+Subject: [PATCH] CVE-2022-2509
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/ce37f9eb265dbe9b6d597f5767449e8ee95848e2]
+CVE: CVE-2022-2509
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ NEWS | 4 +
+ lib/x509/pkcs7.c | 3 +-
+ tests/Makefile.am | 2 +-
+ tests/pkcs7-verify-double-free.c | 215 +++++++++++++++++++++++++++++++
+ 4 files changed, 222 insertions(+), 2 deletions(-)
+ create mode 100644 tests/pkcs7-verify-double-free.c
+
+diff --git a/NEWS b/NEWS
+index 755a67c..ba70bb3 100644
+--- a/NEWS
++++ b/NEWS
+@@ -7,6 +7,10 @@ See the end for copying conditions.
+
+ * Version 3.6.14 (released 2020-06-03)
+
++** libgnutls: Fixed double free during verification of pkcs7 signatures.
++ Reported by Jaak Ristioja (#1383). [GNUTLS-SA-2022-07-07, CVSS: medium]
++ [CVE-2022-2509]
++
+ ** libgnutls: Fixed insecure session ticket key construction, since 3.6.4.
+ The TLS server would not bind the session ticket encryption key with a
+ value supplied by the application until the initial key rotation, allowing
+diff --git a/lib/x509/pkcs7.c b/lib/x509/pkcs7.c
+index 98669e8..ccbc69d 100644
+--- a/lib/x509/pkcs7.c
++++ b/lib/x509/pkcs7.c
+@@ -1318,7 +1318,8 @@ gnutls_x509_crt_t find_signer(gnutls_pkcs7_t pkcs7, gnutls_x509_trust_list_t tl,
+ issuer = find_verified_issuer_of(pkcs7, issuer, purpose, vflags);
+
+ if (issuer != NULL && gnutls_x509_crt_check_issuer(issuer, issuer)) {
+- if (prev) gnutls_x509_crt_deinit(prev);
++ if (prev && prev != signer)
++ gnutls_x509_crt_deinit(prev);
+ prev = issuer;
+ break;
+ }
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 11a083c..cd43a0f 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -219,7 +219,7 @@ ctests += mini-record-2 simple gnutls_hmac_fast set_pkcs12_cred cert certuniquei
+ tls-record-size-limit-asym dh-compute ecdh-compute sign-verify-data-newapi \
+ sign-verify-newapi sign-verify-deterministic iov aead-cipher-vec \
+ tls13-without-timeout-func buffer status-request-revoked \
+- set_x509_ocsp_multi_cli kdf-api keylog-func \
++ set_x509_ocsp_multi_cli kdf-api keylog-func pkcs7-verify-double-free \
+ dtls_hello_random_value tls_hello_random_value x509cert-dntypes
+
+ if HAVE_SECCOMP_TESTS
+diff --git a/tests/pkcs7-verify-double-free.c b/tests/pkcs7-verify-double-free.c
+new file mode 100644
+index 0000000..fadf307
+--- /dev/null
++++ b/tests/pkcs7-verify-double-free.c
+@@ -0,0 +1,215 @@
++/*
++ * Copyright (C) 2022 Red Hat, Inc.
++ *
++ * Author: Zoltan Fridrich
++ *
++ * This file is part of GnuTLS.
++ *
++ * GnuTLS is free software: you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * GnuTLS is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with GnuTLS. If not, see <https://www.gnu.org/licenses/>.
++ */
++
++#ifdef HAVE_CONFIG_H
++#include <config.h>
++#endif
++
++#include <stdio.h>
++#include <gnutls/pkcs7.h>
++#include <gnutls/x509.h>
++
++#include "utils.h"
++
++static char rca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDCjCCAfKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDMzNloYDzIyMjIwNzIxMTQ0MzM2WjAVMRMwEQYD\n"
++ "VQQKDApFeGFtcGxlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\n"
++ "v8hnKPJ/IA0SQB/A/a0Uh+npZ67vsgIMrtTQo0r0kJkmkBz5323xO3DVuJfB3QmX\n"
++ "v9zvoeCQLuDvWar5Aixfxgm6s5Q+yPvJj9t3NebDrU+Y4+qyewBIJUF8EF/5iBPC\n"
++ "ZHONmzbfIRWvQWGGgb2CRcOHp2J7AY/QLB6LsWPaLjs/DHva28Q13JaTTHIpdu8v\n"
++ "t6vHr0nXf66DN4MvtoF3N+o+v3snJCMsfXOqASi4tbWR7gtOfCfiz9uBjh0W2Dut\n"
++ "/jclBQkJkLe6esNSM+f4YiOpctVDjmfj8yoHCp394vt0wFqhG38wsTFAyVP6qIcf\n"
++ "5zoSu9ovEt2cTkhnZHjiiwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud\n"
++ "DwEB/wQEAwIBBjAdBgNVHQ4EFgQUhjeO6Uc5imbjOl2I2ltVA27Hu9YwHwYDVR0j\n"
++ "BBgwFoAUhjeO6Uc5imbjOl2I2ltVA27Hu9YwDQYJKoZIhvcNAQELBQADggEBAD+r\n"
++ "i/7FsbG0OFKGF2+JOnth6NjJQcMfM8LiglqAuBUijrv7vltoZ0Z3FJH1Vi4OeMXn\n"
++ "l7X/9tWUve0uFl75MfjDrf0+lCEdYRY1LCba2BrUgpbbkLywVUdnbsvndehegCgS\n"
++ "jss2/zys3Hlo3ZaHlTMQ/NQ4nrxcxkjOvkZSEOqgxJTLpzm6pr7YUts4k6c6lNiB\n"
++ "FSiJiDzsJCmWR9C3fBbUlfDfTJYGN3JwqX270KchXDElo8gNoDnF7jBMpLFFSEKm\n"
++ "MyfbNLX/srh+CEfZaN/OZV4A3MQ0L8vQEp6M4CJhvRLIuMVabZ2coJ0AzystrOMU\n"
++ "LirBWjg89RoAjFQ7bTE=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ca_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQKDApFeGFt\n"
++ "cGxlIENBMCAXDTE3MDcyMTE0NDQzNFoYDzIyMjIwNzIxMTQ0NDM0WjAiMSAwHgYD\n"
++ "VQQKDBdFeGFtcGxlIGludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAKb9ACB8u//sP6MfNU1OsVw68xz3eTPLgKxS0vpqexm6iGVg\n"
++ "ug/o9uYRLzqiEukv/eyz9WzHmY7sqlOJjOFdv92+SaNg79Jc51WHPFXgea4/qyfr\n"
++ "4y14PGs0SNxm6T44sXurUs7cXydQVUgnq2VCaWFOTUdxXoAWkV8r8GaUoPD/klVz\n"
++ "RqxSZVETmX1XBKhsMnnov41kRwVph2C+VfUspsbaUZaz/o/S1/nokhXRACzKsMBr\n"
++ "obqiGxbY35uVzsmbAW5ErhQz98AWJL3Bub1fsEMXg6OEMmPH4AtX888dTIYZNw0E\n"
++ "bUIESspz1kjJQTtVQDHTprhwz16YiSVeUonlLgMCAwEAAaNjMGEwDwYDVR0TAQH/\n"
++ "BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPBjxDWjMhjXERirKF9O\n"
++ "o/5Cllc5MB8GA1UdIwQYMBaAFIY3julHOYpm4zpdiNpbVQNux7vWMA0GCSqGSIb3\n"
++ "DQEBCwUAA4IBAQCTm+vv3hBa6lL5IT+Fw8aTxQ2Ne7mZ5oyazhvXYwwfKNMX3SML\n"
++ "W2JdPaL64ZwbxxxYvW401o5Z0CEgru3YFrsqB/hEdl0Uf8UWWJmE1rRa+miTmbjt\n"
++ "lrLNCWdrs6CiwvsPITTHg7jevB4KyZYsTSxQFcyr3N3xF+6EmOTC4IkhPPnXYXcp\n"
++ "248ih+WOavSYoRvzgB/Dip1WnPYU2mfIV3O8JReRryngA0TzWCLPLUoWR3R4jwtC\n"
++ "+1uSLoqaenz3qv3F1WEbke37az9YJuXx/5D8CqFQiZ62TUUtI6fYd8mkMBM4Qfh6\n"
++ "NW9XrCkI9wlpL5K9HllhuW0BhKeJkuPpyQ2p\n"
++ "-----END CERTIFICATE-----\n";
++
++static char ee_pem[] =
++ "-----BEGIN CERTIFICATE-----\n"
++ "MIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdFeGFt\n"
++ "cGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzdaGA8yMjIyMDcyMTE0\n"
++ "NDUzN1owFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEBBQAD\n"
++ "ggEPADCCAQoCggEBAMb1uuxppBFY+WVD45iyHUq7DkIJNNOI/JRaybVJfPktWq2E\n"
++ "eNe7XhV05KKnqZTbDO2iYqNHqGhZ8pz/IstDRTZP3z/q1vXTG0P9Gx28rEy5TaUY\n"
++ "QjtD+ZoFUQm0ORMDBjd8jikqtJ87hKeuOPMH4rzdydotMaPQSm7KLzHBGBr6gg7z\n"
++ "g1IxPWkhMyHapoMqqrhjwjzoTY97UIXpZTEoIA+KpEC8f9CciBtL0i1MPBjWozB6\n"
++ "Jma9q5iEwZXuRr3cnPYeIPlK2drgDZCMuSFcYiT8ApLw5OhKqY1m2EvfZ2ox2s9R\n"
++ "68/HzYdPi3kZwiNEtlBvMlpt5yKBJAflp76d7DkCAwEAAaNuMGwwCwYDVR0PBAQD\n"
++ "AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQUc+Mi\n"
++ "kr8WMCk00SQo+P2iggp/oQkwHwYDVR0jBBgwFoAU8GPENaMyGNcRGKsoX06j/kKW\n"
++ "VzkwDQYJKoZIhvcNAQELBQADggEBAKU9+CUR0Jcfybd1+8Aqgh1RH96yQygnVuyt\n"
++ "Na9rFz4fM3ij9tGXDHXrkZw8bW1dWLU9quu8zeTxKxc3aiDIw739Alz0tukttDo7\n"
++ "dW7YqIb77zsIsWB9p7G9dlxT6ieUy+5IKk69BbeK8KR0vAciAG4KVQxPhuPy/LGX\n"
++ "PzqlJIJ4h61s3UOroReHPB1keLZgpORqrvtpClOmABH9TLFRJA/WFg8Q2XYB/p0x\n"
++ "l/pWiaoBC+8wK9cDoMUK5yOwXeuCLffCb+UlAD0+z/qxJ2pisE8E9X8rRKRrWI+i\n"
++ "G7LtJCEn86EQK8KuRlJxKgj8lClZhoULB0oL4jbblBuNow9WRmM=\n"
++ "-----END CERTIFICATE-----\n";
++
++static char msg_pem[] =
++ "-----BEGIN PKCS7-----\n"
++ "MIIK2QYJKoZIhvcNAQcCoIIKyjCCCsYCAQExDTALBglghkgBZQMEAgEwCwYJKoZI\n"
++ "hvcNAQcBoIIJTzCCAwowggHyoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwFTETMBEG\n"
++ "A1UECgwKRXhhbXBsZSBDQTAgFw0xNzA3MjExNDQzMjFaGA8yMjIyMDcyMTE0NDMy\n"
++ "MVowFTETMBEGA1UECgwKRXhhbXBsZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP\n"
++ "ADCCAQoCggEBAL51eyE4j8wAKQKMGlO9HEY2iaGvsdPSJmidSdmCi1jnNK39Lx4Y\n"
++ "31h279hSHF5wtI6VM91HHfeLf1mjEZHlKrXXJQzBPLpbHWapD778drHBitOP8e56\n"
++ "fDMIfofLV4tkMk8690vPe4cJH1UHGspMyz6EQF9kPRaW80XtMV/6dalgL/9Esmaw\n"
++ "XBNPJAS1VutDuXQkJ/3/rWFLmkpYHHtGPjX782YRmT1s+VOVTsLqmKx0TEL8A381\n"
++ "bbElHPUAMjPcyWR5qqA8KWnS5Dwqk3LwI0AvuhQytCq0S7Xl4DXauvxwTRXv0UU7\n"
++ "W8r3MLAw9DnlnJiD/RFjw5rbGO3wMePk/qUCAwEAAaNjMGEwDwYDVR0TAQH/BAUw\n"
++ "AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFIh2KRoKJoe2VtpOwWMkRAkR\n"
++ "mLWKMB8GA1UdIwQYMBaAFIh2KRoKJoe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEB\n"
++ "CwUAA4IBAQBovvlOjoy0MCT5U0eWfcPQQjY4Ssrn3IiPNlVkqSNo+FHX+2baTLVQ\n"
++ "5QTHxwXwzdIJiwtjFWDdGEQXqmuIvnFG+u/whGbeg6oQygfnQ5Y+q6epOxCsPgLQ\n"
++ "mKKEaF7mvh8DauUx4QSbYCNGCctOZuB1vlN9bJ3/5QbH+2pFPOfCr5CAyPDwHo6S\n"
++ "qO3yPcutRwT9xS7gXEHM9HhLp+DmdCGh4eVBPiFilyZm1d92lWxU8oxoSfXgzDT/\n"
++ "GCzlMykNZNs4JD9QmiRClP/3U0dQbOhah/Fda+N+L90xaqEgGcvwKKZa3pzo59pl\n"
++ "BbkcIP4YPyHeinwkgAn5UVJg9DOxNCS0MIIDFzCCAf+gAwIBAgIBAjANBgkqhkiG\n"
++ "9w0BAQsFADAVMRMwEQYDVQQKDApFeGFtcGxlIENBMCAXDTE3MDcyMTE0NDQxM1oY\n"
++ "DzIyMjIwNzIxMTQ0NDEzWjAiMSAwHgYDVQQKDBdFeGFtcGxlIGludGVybWVkaWF0\n"
++ "ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMPFDEvDANwvhviu\n"
++ "pwXTvaKyxyX94jVu1wgAhIRyQBVRiMbrn8MEufLG8oA0vKd8s92gv/lWe1jFb2rn\n"
++ "91jMkZWsjWjiJFD6SzqFfBo+XxOGikEqO1MAf92UqavmSGlXVRG1Vy7T7dWibZP0\n"
++ "WODhHYWayR0Y6owSz5IqNfrHXzDME+lSJxHgRFI7pK+b0OgiVmvyXDKFPvyU6GrP\n"
++ "lxXDi/XbjyPvC5gpiwtTgm+s8KERwmdlfZUNjkh2PpHx1g1joijHT3wIvO/Pek1E\n"
++ "C+Xs6w3XxGgL6TTL7FDuv4AjZVX9KK66/yBhX3aN8bkqAg+hs9XNk3zzWC0XEFOS\n"
++ "Qoh2va0CAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw\n"
++ "HQYDVR0OBBYEFHwi/7dUWGjkMWJctOm7MCjjQj1cMB8GA1UdIwQYMBaAFIh2KRoK\n"
++ "Joe2VtpOwWMkRAkRmLWKMA0GCSqGSIb3DQEBCwUAA4IBAQCF6sHCBdYRwBwvfCve\n"
++ "og9cPnmPqZrG4AtmSvtoSsMvgvKb/4z3/gG8oPtTBkeRcAHoMoEp/oA+B2ylwIAc\n"
++ "S5U7jx+lYH/Pqih0X/OcOLbaMv8uzGSGQxk+L9LuuIT6E/THfRRIPEvkDkzC+/uk\n"
++ "7vUbG17bSEWeF0o/6sjzAY2aH1jnbCDyu0UC78GXkc6bZ5QlH98uLMDMrOmqcZjS\n"
++ "JFfvuRDQyKV5yBdBkYaobsIWSQDsgYxJzf/2y8c3r+HXqT+jhrXPWJ3btgMPxpu7\n"
++ "E8KmoFgp9EM+48oYlXJ66rk08/KjaVmgN7R+Hm3e2+MFT2kme4fBKalLjcazTe3x\n"
++ "0FisMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQKDBdF\n"
++ "eGFtcGxlIGludGVybWVkaWF0ZSBDQTAgFw0yMjA3MjExNDQ1MzBaGA8yMjIyMDcy\n"
++ "MTE0NDUzMVowFTETMBEGA1UEAwwKSm9obiBTbWl0aDCCASIwDQYJKoZIhvcNAQEB\n"
++ "BQADggEPADCCAQoCggEBAMjhSqhdD5RjmOm6W3hG7zkgKBP9whRN/SipcdEMlkgc\n"
++ "F/U3QMu66qIfKwheNdWalC1JLtruLDWP92ysa6Vw+CCG8aSax1AgB//RKQB7kgPA\n"
++ "9js9hi/oCdBmCv2HJxhWSLz+MVoxgzW4C7S9FenI+btxe/99Uw4nOw7kwjsYDLKr\n"
++ "tMw8myv7aCW/63CuBYGtohiZupM3RI3kKFcZots+KRPLlZpjv+I2h9xSln8VxKNb\n"
++ "XiMrYwGfHB7iX7ghe1TvFjKatEUhsqa7AvIq7nfe/cyq97f0ODQO814njgZtk5iQ\n"
++ "JVavXHdhTVaypt1HdAFMuHX5UATylHxx9tRCgSIijUsCAwEAAaNuMGwwCwYDVR0P\n"
++ "BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDAdBgNVHQ4EFgQU\n"
++ "31+vHl4E/2Jpnwinbzf+d7usshcwHwYDVR0jBBgwFoAUfCL/t1RYaOQxYly06bsw\n"
++ "KONCPVwwDQYJKoZIhvcNAQELBQADggEBAAWe63DcNwmleQ3INFGDJZ/m2I/R/cBa\n"
++ "nnrxgR5Ey1ljHdA/x1z1JLTGmGVwqGExs5DNG9Q//Pmc9pZ1yPa8J4Xf8AvFcmkY\n"
++ "mWoH1HvW0xu/RF1UN5SAoD2PRQ+Vq4OSPD58IlEu/u4o1wZV7Wl91Cv6VNpiAb63\n"
++ "j9PA1YacOpOtcRqG59Vuj9HFm9f30ejHVo2+KJcpo290cR3Zg4fOm8mtjeMdt/QS\n"
++ "Atq+RqPAQ7yxqvEEv8zPIZj2kAOQm3mh/yYqBrR68lQUD/dBTP7ApIZkhUK3XK6U\n"
++ "nf9JvoF6Fn2+Cnqb//FLBgHSnoeqeQNwDLUXTsD02iYxHzJrhokSY4YxggFQMIIB\n"
++ "TAIBATAnMCIxIDAeBgNVBAoMF0V4YW1wbGUgaW50ZXJtZWRpYXRlIENBAgEBMAsG\n"
++ "CWCGSAFlAwQCATANBgkqhkiG9w0BAQEFAASCAQATHg6wNsBcs/Ub1GQfKwTpKCk5\n"
++ "8QXuNnZ0u7b6mKgrSY2Gf47fpL2aRgaR+BAQncbctu5EH/IL38pWjaGtOhFAj/5q\n"
++ "7luVQW11kuyJN3Bd/dtLqawWOwMmAIEigw6X50l5ZHnEVzFfxt+RKTNhk4XWVtbi\n"
++ "2iIlITOplW0rnvxYAwCxKL9ocaB7etK8au7ixMxbFp75Ts4iLX8dhlAFdCuFCk8k\n"
++ "B8mi9HHuwr3QYRqMPW61hu1wBL3yB8eoZNOwPXb0gkIh6ZvgptxgQzm/cc+Iw9fP\n"
++ "QkR0fTM7ElJ5QZmSV98AUbZDHmDvpmcjcUxfSPMc3IoT8T300usRu7QHqKJi\n"
++ "-----END PKCS7-----\n";
++
++const gnutls_datum_t rca_datum = { (void *)rca_pem, sizeof(rca_pem) - 1 };
++const gnutls_datum_t ca_datum = { (void *)ca_pem, sizeof(ca_pem) - 1 };
++const gnutls_datum_t ee_datum = { (void *)ee_pem, sizeof(ee_pem) - 1 };
++const gnutls_datum_t msg_datum = { (void *)msg_pem, sizeof(msg_pem) - 1 };
++
++static void tls_log_func(int level, const char *str)
++{
++ fprintf(stderr, "%s |<%d>| %s", "err", level, str);
++}
++
++#define CHECK(X)\
++{\
++ r = X;\
++ if (r < 0)\
++ fail("error in %d: %s\n", __LINE__, gnutls_strerror(r));\
++}\
++
++void doit(void)
++{
++ int r;
++ gnutls_x509_crt_t rca_cert = NULL;
++ gnutls_x509_crt_t ca_cert = NULL;
++ gnutls_x509_crt_t ee_cert = NULL;
++ gnutls_x509_trust_list_t tlist = NULL;
++ gnutls_pkcs7_t pkcs7 = NULL;
++ gnutls_datum_t data = { (unsigned char *)"xxx", 3 };
++
++ if (debug) {
++ gnutls_global_set_log_function(tls_log_func);
++ gnutls_global_set_log_level(4711);
++ }
++
++ // Import certificates
++ CHECK(gnutls_x509_crt_init(&rca_cert));
++ CHECK(gnutls_x509_crt_import(rca_cert, &rca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ca_cert));
++ CHECK(gnutls_x509_crt_import(ca_cert, &ca_datum, GNUTLS_X509_FMT_PEM));
++ CHECK(gnutls_x509_crt_init(&ee_cert));
++ CHECK(gnutls_x509_crt_import(ee_cert, &ee_datum, GNUTLS_X509_FMT_PEM));
++
++ // Setup trust store
++ CHECK(gnutls_x509_trust_list_init(&tlist, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, rca_cert, "rca", 3, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ca_cert, "ca", 2, 0));
++ CHECK(gnutls_x509_trust_list_add_named_crt(tlist, ee_cert, "ee", 2, 0));
++
++ // Setup pkcs7 structure
++ CHECK(gnutls_pkcs7_init(&pkcs7));
++ CHECK(gnutls_pkcs7_import(pkcs7, &msg_datum, GNUTLS_X509_FMT_PEM));
++
++ // Signature verification
++ gnutls_pkcs7_verify(pkcs7, tlist, NULL, 0, 0, &data, 0);
++
++ gnutls_x509_crt_deinit(rca_cert);
++ gnutls_x509_crt_deinit(ca_cert);
++ gnutls_x509_crt_deinit(ee_cert);
++ gnutls_x509_trust_list_deinit(tlist, 0);
++ gnutls_pkcs7_deinit(pkcs7);
++}
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
new file mode 100644
index 0000000000..943f4ca704
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-0361.patch
@@ -0,0 +1,85 @@
+From 80a6ce8ddb02477cd724cd5b2944791aaddb702a Mon Sep 17 00:00:00 2001
+From: Alexander Sosedkin <asosedkin@redhat.com>
+Date: Tue, 9 Aug 2022 16:05:53 +0200
+Subject: [PATCH] auth/rsa: side-step potential side-channel
+
+Signed-off-by: Alexander Sosedkin <asosedkin@redhat.com>
+Signed-off-by: Hubert Kario <hkario@redhat.com>
+Tested-by: Hubert Kario <hkario@redhat.com>
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/80a6ce8ddb02477cd724cd5b2944791aaddb702a
+ https://gitlab.com/gnutls/gnutls/-/commit/4b7ff428291c7ed77c6d2635577c83a43bbae558]
+CVE: CVE-2023-0361
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/auth/rsa.c | 30 +++---------------------------
+ 1 file changed, 3 insertions(+), 27 deletions(-)
+
+diff --git a/lib/auth/rsa.c b/lib/auth/rsa.c
+index 8108ee8..858701f 100644
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -155,13 +155,10 @@ static int
+ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ size_t _data_size)
+ {
+- const char attack_error[] = "auth_rsa: Possible PKCS #1 attack\n";
+ gnutls_datum_t ciphertext;
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ volatile uint8_t ver_maj, ver_min;
+- volatile uint8_t check_ver_min;
+- volatile uint32_t ok;
+
+ #ifdef ENABLE_SSL3
+ if (get_num_version(session) == GNUTLS_SSL3) {
+@@ -187,7 +184,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+- check_ver_min = (session->internals.allow_wrong_pms == 0);
+
+ session->key.key.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+ if (session->key.key.data == NULL) {
+@@ -206,10 +202,9 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ return ret;
+ }
+
+- ret =
+- gnutls_privkey_decrypt_data2(session->internals.selected_key,
+- 0, &ciphertext, session->key.key.data,
+- session->key.key.size);
++ gnutls_privkey_decrypt_data2(session->internals.selected_key,
++ 0, &ciphertext, session->key.key.data,
++ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so treat very carefully */
+@@ -225,25 +220,6 @@ proc_rsa_client_kx(gnutls_session_t session, uint8_t * data,
+ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
+ */
+
+- /* ok is 0 in case of error and 1 in case of success. */
+-
+- /* if ret < 0 */
+- ok = CONSTCHECK_EQUAL(ret, 0);
+- /* session->key.key.data[0] must equal ver_maj */
+- ok &= CONSTCHECK_EQUAL(session->key.key.data[0], ver_maj);
+- /* if check_ver_min then session->key.key.data[1] must equal ver_min */
+- ok &= CONSTCHECK_NOT_EQUAL(check_ver_min, 0) &
+- CONSTCHECK_EQUAL(session->key.key.data[1], ver_min);
+-
+- if (ok) {
+- /* call logging function unconditionally so all branches are
+- * indistinguishable for timing and cache access when debug
+- * logging is disabled */
+- _gnutls_no_log("%s", attack_error);
+- } else {
+- _gnutls_debug_log("%s", attack_error);
+- }
+-
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
new file mode 100644
index 0000000000..c518cfa0ac
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2023-5981.patch
@@ -0,0 +1,206 @@
+Backport of:
+
+From 29d6298d0b04cfff970b993915db71ba3f580b6d Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Mon, 23 Oct 2023 09:26:57 +0900
+Subject: [PATCH] auth/rsa_psk: side-step potential side-channel
+
+This removes branching that depends on secret data, porting changes
+for regular RSA key exchange from
+4804febddc2ed958e5ae774de2a8f85edeeff538 and
+80a6ce8ddb02477cd724cd5b2944791aaddb702a. This also removes the
+allow_wrong_pms as it was used sorely to control debug output
+depending on the branching.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/gnutls28/3.6.13-2ubuntu1.9/gnutls28_3.6.13-2ubuntu1.9.debian.tar.xz
+Upstream-Commit: https://gitlab.com/gnutls/gnutls/-/commit/29d6298d0b04cfff970b993915db71ba3f580b6d]
+CVE: CVE-2023-5981
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa.c | 2 +-
+ lib/auth/rsa_psk.c | 90 ++++++++++++++++++----------------------------
+ lib/gnutls_int.h | 4 ---
+ lib/priority.c | 1 -
+ 4 files changed, 35 insertions(+), 62 deletions(-)
+
+--- a/lib/auth/rsa.c
++++ b/lib/auth/rsa.c
+@@ -207,7 +207,7 @@ proc_rsa_client_kx(gnutls_session_t sess
+ session->key.key.size);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+- * channel that can be used as an oracle, so treat very carefully */
++ * channel that can be used as an oracle, so tread carefully */
+
+ /* Error handling logic:
+ * In case decryption fails then don't inform the peer. Just use the
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -264,14 +264,13 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ {
+ gnutls_datum_t username;
+ psk_auth_info_t info;
+- gnutls_datum_t plaintext;
+ gnutls_datum_t ciphertext;
+ gnutls_datum_t pwd_psk = { NULL, 0 };
+ int ret, dsize;
+- int randomize_key = 0;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+ gnutls_datum_t premaster_secret = { NULL, 0 };
++ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+ _gnutls_get_cred(session, GNUTLS_CRD_PSK);
+@@ -327,71 +326,47 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_se
+ }
+ ciphertext.size = dsize;
+
+- ret =
+- gnutls_privkey_decrypt_data(session->internals.selected_key, 0,
+- &ciphertext, &plaintext);
+- if (ret < 0 || plaintext.size != GNUTLS_MASTER_SIZE) {
+- /* In case decryption fails then don't inform
+- * the peer. Just use a random key. (in order to avoid
+- * attack against pkcs-1 formatting).
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa_psk: Possible PKCS #1 format attack\n");
+- if (ret >= 0) {
+- gnutls_free(plaintext.data);
+- }
+- randomize_key = 1;
+- } else {
+- /* If the secret was properly formatted, then
+- * check the version number.
+- */
+- if (_gnutls_get_adv_version_major(session) !=
+- plaintext.data[0]
+- || (session->internals.allow_wrong_pms == 0
+- && _gnutls_get_adv_version_minor(session) !=
+- plaintext.data[1])) {
+- /* No error is returned here, if the version number check
+- * fails. We proceed normally.
+- * That is to defend against the attack described in the paper
+- * "Attacking RSA-based sessions in SSL/TLS" by Vlastimil Klima,
+- * Ondej Pokorny and Tomas Rosa.
+- */
+- gnutls_assert();
+- _gnutls_debug_log
+- ("auth_rsa: Possible PKCS #1 version check format attack\n");
+- }
+- }
++ ver_maj = _gnutls_get_adv_version_major(session);
++ ver_min = _gnutls_get_adv_version_minor(session);
+
++ premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
++ if (premaster_secret.data == NULL) {
++ gnutls_assert();
++ return GNUTLS_E_MEMORY_ERROR;
++ }
++ premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+- if (randomize_key != 0) {
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+- premaster_secret.data =
+- gnutls_malloc(premaster_secret.size);
+- if (premaster_secret.data == NULL) {
+- gnutls_assert();
+- return GNUTLS_E_MEMORY_ERROR;
+- }
+-
+- /* we do not need strong random numbers here.
+- */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+- } else {
+- premaster_secret.data = plaintext.data;
+- premaster_secret.size = plaintext.size;
++ /* Fallback value when decryption fails. Needs to be unpredictable. */
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
++ premaster_secret.size);
++ if (ret < 0) {
++ gnutls_assert();
++ goto cleanup;
+ }
+
++ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
++ &ciphertext, premaster_secret.data,
++ premaster_secret.size);
++ /* After this point, any conditional on failure that cause differences
++ * in execution may create a timing or cache access pattern side
++ * channel that can be used as an oracle, so tread carefully */
++
++ /* Error handling logic:
++ * In case decryption fails then don't inform the peer. Just use the
++ * random key previously generated. (in order to avoid attack against
++ * pkcs-1 formatting).
++ *
++ * If we get version mismatches no error is returned either. We
++ * proceed normally. This is to defend against the attack described
++ * in the paper "Attacking RSA-based sessions in SSL/TLS" by
++ * Vlastimil Klima, Ondej Pokorny and Tomas Rosa.
++ */
++
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+-
+- premaster_secret.data[0] = _gnutls_get_adv_version_major(session);
+- premaster_secret.data[1] = _gnutls_get_adv_version_minor(session);
++ premaster_secret.data[0] = ver_maj;
++ premaster_secret.data[1] = ver_min;
+
+ /* find the key of this username
+ */
+--- a/lib/gnutls_int.h
++++ b/lib/gnutls_int.h
+@@ -989,7 +989,6 @@ struct gnutls_priority_st {
+ bool _no_etm;
+ bool _no_ext_master_secret;
+ bool _allow_key_usage_violation;
+- bool _allow_wrong_pms;
+ bool _dumbfw;
+ unsigned int _dh_prime_bits; /* old (deprecated) variable */
+
+@@ -1007,7 +1006,6 @@ struct gnutls_priority_st {
+ (x)->no_etm = 1; \
+ (x)->no_ext_master_secret = 1; \
+ (x)->allow_key_usage_violation = 1; \
+- (x)->allow_wrong_pms = 1; \
+ (x)->dumbfw = 1
+
+ #define ENABLE_PRIO_COMPAT(x) \
+@@ -1016,7 +1014,6 @@ struct gnutls_priority_st {
+ (x)->_no_etm = 1; \
+ (x)->_no_ext_master_secret = 1; \
+ (x)->_allow_key_usage_violation = 1; \
+- (x)->_allow_wrong_pms = 1; \
+ (x)->_dumbfw = 1
+
+ /* DH and RSA parameters types.
+@@ -1141,7 +1138,6 @@ typedef struct {
+ bool no_etm;
+ bool no_ext_master_secret;
+ bool allow_key_usage_violation;
+- bool allow_wrong_pms;
+ bool dumbfw;
+
+ /* old (deprecated) variable. This is used for both srp_prime_bits
+--- a/lib/priority.c
++++ b/lib/priority.c
+@@ -681,7 +681,6 @@ gnutls_priority_set(gnutls_session_t ses
+ COPY_TO_INTERNALS(no_etm);
+ COPY_TO_INTERNALS(no_ext_master_secret);
+ COPY_TO_INTERNALS(allow_key_usage_violation);
+- COPY_TO_INTERNALS(allow_wrong_pms);
+ COPY_TO_INTERNALS(dumbfw);
+ COPY_TO_INTERNALS(dh_prime_bits);
+
diff --git a/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
new file mode 100644
index 0000000000..f15c470879
--- /dev/null
+++ b/meta/recipes-support/gnutls/gnutls/CVE-2024-0553.patch
@@ -0,0 +1,125 @@
+From 40dbbd8de499668590e8af51a15799fbc430595e Mon Sep 17 00:00:00 2001
+From: Daiki Ueno <ueno@gnu.org>
+Date: Wed, 10 Jan 2024 19:13:17 +0900
+Subject: [PATCH] rsa-psk: minimize branching after decryption
+
+This moves any non-trivial code between gnutls_privkey_decrypt_data2
+and the function return in _gnutls_proc_rsa_psk_client_kx up until the
+decryption. This also avoids an extra memcpy to session->key.key.
+
+Signed-off-by: Daiki Ueno <ueno@gnu.org>
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls/-/commit/40dbbd8de499668590e8af51a15799fbc430595e]
+CVE: CVE-2024-0553
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ lib/auth/rsa_psk.c | 68 ++++++++++++++++++++++++----------------------
+ 1 file changed, 35 insertions(+), 33 deletions(-)
+
+diff --git a/lib/auth/rsa_psk.c b/lib/auth/rsa_psk.c
+index 93c2dc9..c6cfb92 100644
+--- a/lib/auth/rsa_psk.c
++++ b/lib/auth/rsa_psk.c
+@@ -269,7 +269,6 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ int ret, dsize;
+ ssize_t data_size = _data_size;
+ gnutls_psk_server_credentials_t cred;
+- gnutls_datum_t premaster_secret = { NULL, 0 };
+ volatile uint8_t ver_maj, ver_min;
+
+ cred = (gnutls_psk_server_credentials_t)
+@@ -329,24 +328,48 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ ver_maj = _gnutls_get_adv_version_major(session);
+ ver_min = _gnutls_get_adv_version_minor(session);
+
+- premaster_secret.data = gnutls_malloc(GNUTLS_MASTER_SIZE);
+- if (premaster_secret.data == NULL) {
++ /* Find the key of this username. A random value will be
++ * filled in if the key is not found.
++ */
++ ret = _gnutls_psk_pwd_find_entry(session, info->username,
++ strlen(info->username), &pwd_psk);
++ if (ret < 0)
++ return gnutls_assert_val(ret);
++
++ /* Allocate memory for premaster secret, and fill in the
++ * fields except the decryption result.
++ */
++ session->key.key.size = 2 + GNUTLS_MASTER_SIZE + 2 + pwd_psk.size;
++ session->key.key.data = gnutls_malloc(session->key.key.size);
++ if (session->key.key.data == NULL) {
+ gnutls_assert();
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
+ return GNUTLS_E_MEMORY_ERROR;
+ }
+- premaster_secret.size = GNUTLS_MASTER_SIZE;
+
+ /* Fallback value when decryption fails. Needs to be unpredictable. */
+- ret = gnutls_rnd(GNUTLS_RND_NONCE, premaster_secret.data,
+- premaster_secret.size);
++ ret = gnutls_rnd(GNUTLS_RND_NONCE, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ if (ret < 0) {
+ gnutls_assert();
+- goto cleanup;
++ _gnutls_free_key_datum(&pwd_psk);
++ /* No need to zeroize, as the secret is not copied in yet */
++ _gnutls_free_datum(&session->key.key);
++ return ret;
+ }
+
++ _gnutls_write_uint16(GNUTLS_MASTER_SIZE, session->key.key.data);
++ _gnutls_write_uint16(pwd_psk.size,
++ &session->key.key.data[2 + GNUTLS_MASTER_SIZE]);
++ memcpy(&session->key.key.data[2 + GNUTLS_MASTER_SIZE + 2], pwd_psk.data,
++ pwd_psk.size);
++ _gnutls_free_key_datum(&pwd_psk);
++
+ gnutls_privkey_decrypt_data2(session->internals.selected_key, 0,
+- &ciphertext, premaster_secret.data,
+- premaster_secret.size);
++ &ciphertext, session->key.key.data + 2,
++ GNUTLS_MASTER_SIZE);
+ /* After this point, any conditional on failure that cause differences
+ * in execution may create a timing or cache access pattern side
+ * channel that can be used as an oracle, so tread carefully */
+@@ -365,31 +388,10 @@ _gnutls_proc_rsa_psk_client_kx(gnutls_session_t session, uint8_t * data,
+ /* This is here to avoid the version check attack
+ * discussed above.
+ */
+- premaster_secret.data[0] = ver_maj;
+- premaster_secret.data[1] = ver_min;
++ session->key.key.data[2] = ver_maj;
++ session->key.key.data[3] = ver_min;
+
+- /* find the key of this username
+- */
+- ret =
+- _gnutls_psk_pwd_find_entry(session, info->username, strlen(info->username), &pwd_psk);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret =
+- set_rsa_psk_session_key(session, &pwd_psk, &premaster_secret);
+- if (ret < 0) {
+- gnutls_assert();
+- goto cleanup;
+- }
+-
+- ret = 0;
+- cleanup:
+- _gnutls_free_key_datum(&pwd_psk);
+- _gnutls_free_temp_key_datum(&premaster_secret);
+-
+- return ret;
++ return 0;
+ }
+
+ static int
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/gnutls_3.6.14.bb b/meta/recipes-support/gnutls/gnutls_3.6.14.bb
index 0c68da7c54..a1451daf2c 100644
--- a/meta/recipes-support/gnutls/gnutls_3.6.14.bb
+++ b/meta/recipes-support/gnutls/gnutls_3.6.14.bb
@@ -25,6 +25,11 @@ SRC_URI = "https://www.gnupg.org/ftp/gcrypt/gnutls/v${SHRT_VER}/gnutls-${PV}.tar
file://CVE-2020-24659.patch \
file://CVE-2021-20231.patch \
file://CVE-2021-20232.patch \
+ file://CVE-2022-2509.patch \
+ file://CVE-2021-4209.patch \
+ file://CVE-2023-0361.patch \
+ file://CVE-2023-5981.patch \
+ file://CVE-2024-0553.patch \
"
SRC_URI[sha256sum] = "5630751adec7025b8ef955af4d141d00d252a985769f51b4059e5affa3d39d63"
diff --git a/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch b/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch
new file mode 100644
index 0000000000..9a8ceecbe7
--- /dev/null
+++ b/meta/recipes-support/gnutls/libtasn1/CVE-2021-46848.patch
@@ -0,0 +1,45 @@
+From 22fd12b290adea788122044cb58dc9e77754644f Mon Sep 17 00:00:00 2001
+From: Vivek Kumbhar <vkumbhar@mvista.com>
+Date: Thu, 17 Nov 2022 12:07:50 +0530
+Subject: [PATCH] CVE-2021-46848
+
+Upstream-Status: Backport [https://gitlab.com/gnutls/libtasn1/-/commit/44a700d2051a666235748970c2df047ff207aeb5]
+CVE: CVE-2021-46848
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+
+Fix ETYPE_OK off by one array size check.
+---
+ NEWS | 4 ++++
+ lib/int.h | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/NEWS b/NEWS
+index f042481..d8f684e 100644
+--- a/NEWS
++++ b/NEWS
+@@ -1,5 +1,9 @@
+ GNU Libtasn1 NEWS -*- outline -*-
+
++* Noteworthy changes in release ?.? (????-??-??) [?]
++- Fix ETYPE_OK out of bounds read. Closes: #32.
++- Update gnulib files and various maintenance fixes.
++
+ * Noteworthy changes in release 4.16.0 (released 2020-02-01) [stable]
+ - asn1_decode_simple_ber: added support for constructed definite
+ octet strings. This allows this function decode the whole set of
+diff --git a/lib/int.h b/lib/int.h
+index ea16257..c877282 100644
+--- a/lib/int.h
++++ b/lib/int.h
+@@ -97,7 +97,7 @@ typedef struct tag_and_class_st
+ #define ETYPE_TAG(etype) (_asn1_tags[etype].tag)
+ #define ETYPE_CLASS(etype) (_asn1_tags[etype].class)
+ #define ETYPE_OK(etype) (((etype) != ASN1_ETYPE_INVALID && \
+- (etype) <= _asn1_tags_size && \
++ (etype) < _asn1_tags_size && \
+ _asn1_tags[(etype)].desc != NULL)?1:0)
+
+ #define ETYPE_IS_STRING(etype) ((etype == ASN1_ETYPE_GENERALSTRING || \
+--
+2.25.1
+
diff --git a/meta/recipes-support/gnutls/libtasn1_4.16.0.bb b/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
index 8d3a14506a..d2b3c492ec 100644
--- a/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
+++ b/meta/recipes-support/gnutls/libtasn1_4.16.0.bb
@@ -12,6 +12,7 @@ LIC_FILES_CHKSUM = "file://doc/COPYING;md5=d32239bcb673463ab874e80d47fae504 \
SRC_URI = "${GNU_MIRROR}/libtasn1/libtasn1-${PV}.tar.gz \
file://dont-depend-on-help2man.patch \
+ file://CVE-2021-46848.patch \
"
DEPENDS = "bison-native"
diff --git a/meta/recipes-support/libbsd/libbsd_0.10.0.bb b/meta/recipes-support/libbsd/libbsd_0.10.0.bb
index 5b32b9af41..58925738cb 100644
--- a/meta/recipes-support/libbsd/libbsd_0.10.0.bb
+++ b/meta/recipes-support/libbsd/libbsd_0.10.0.bb
@@ -29,6 +29,12 @@ HOMEPAGE = "https://libbsd.freedesktop.org/wiki/"
# License: public-domain-Colin-Plumb
LICENSE = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
LICENSE_${PN} = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dbg = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-dev = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-doc = "BSD-3-Clause & BSD-4-Clause & ISC & PD"
+LICENSE:${PN}-locale = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-src = "BSD-3-Clause & ISC & PD"
+LICENSE:${PN}-staticdev = "BSD-3-Clause & ISC & PD"
LIC_FILES_CHKSUM = "file://COPYING;md5=2120be0173469a06ed185b688e0e1ae0"
SECTION = "libs"
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2602.patch b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
new file mode 100644
index 0000000000..ca04d7297a
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2602.patch
@@ -0,0 +1,52 @@
+Backport of:
+
+From bc6b36682f188020ee4770fae1d41bde5b2c97bb Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:18:36 -0700
+Subject: Correct the check of pthread_create()'s return value.
+
+This function returns a positive number (errno) on error, so the code
+wasn't previously freeing some memory in this situation.
+
+Discussion:
+
+ https://stackoverflow.com/a/3581020/14760867
+
+Credit for finding this bug in libpsx goes to David Gstir of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security
+audit of the libcap source code in April of 2023. The audit
+was sponsored by the Open Source Technology Improvement Fund
+(https://ostif.org/).
+
+Audit ref: LCAP-CR-23-01 (CVE-2023-2602)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libcap2/tree/debian/patches/CVE-2023-2602.patch?h=ubuntu/focal-security
+Upstream commit https://git.kernel.org/pub/scm/libs/libcap/libcap.git/commit/?id=bc6b36682f188020ee4770fae1d41bde5b2c97bb]
+CVE: CVE-2023-2602
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ psx/psx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/libcap/psx.c
++++ b/libcap/psx.c
+@@ -272,7 +272,7 @@ int psx_pthread_create(pthread_t *thread
+
+ psx_wait_for_idle();
+ int ret = pthread_create(thread, attr, start_routine, arg);
+- if (ret != -1) {
++ if (ret == 0) {
+ psx_do_registration(*thread);
+ }
+ psx_resume_idle();
+@@ -287,7 +287,7 @@ int __wrap_pthread_create(pthread_t *thr
+ void *(*start_routine) (void *), void *arg) {
+ psx_wait_for_idle();
+ int ret = __real_pthread_create(thread, attr, start_routine, arg);
+- if (ret != -1) {
++ if (ret == 0) {
+ psx_do_registration(*thread);
+ }
+ psx_resume_idle();
diff --git a/meta/recipes-support/libcap/files/CVE-2023-2603.patch b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
new file mode 100644
index 0000000000..cf86ac2a46
--- /dev/null
+++ b/meta/recipes-support/libcap/files/CVE-2023-2603.patch
@@ -0,0 +1,58 @@
+Backport of:
+
+From 422bec25ae4a1ab03fd4d6f728695ed279173b18 Mon Sep 17 00:00:00 2001
+From: "Andrew G. Morgan" <morgan@kernel.org>
+Date: Wed, 3 May 2023 19:44:22 -0700
+Subject: Large strings can confuse libcap's internal strdup code.
+
+Avoid something subtle with really long strings: 1073741823 should
+be enough for anybody. This is an improved fix over something attempted
+in libcap-2.55 to address some static analysis findings.
+
+Reviewing the library, cap_proc_root() and cap_launcher_set_chroot()
+are the only two calls where the library is potentially exposed to a
+user controlled string input.
+
+Credit for finding this bug in libcap goes to Richard Weinberger of
+X41 D-Sec GmbH (https://x41-dsec.de/) who performed a security audit
+of the libcap source code in April of 2023. The audit was sponsored
+by the Open Source Technology Improvement Fund (https://ostif.org/).
+
+Audit ref: LCAP-CR-23-02 (CVE-2023-2603)
+
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/libcap2/tree/debian/patches/CVE-2023-2603.patch?h=ubuntu/focal-security
+Upstream commit https://git.kernel.org/pub/scm/libs/libcap/libcap.git/commit/?id=422bec25ae4a1ab03fd4d6f728695ed279173b18]
+CVE: CVE-2023-2603
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ libcap/cap_alloc.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/libcap/cap_alloc.c
++++ b/libcap/cap_alloc.c
+@@ -76,13 +76,22 @@ cap_t cap_init(void)
+ char *_libcap_strdup(const char *old)
+ {
+ __u32 *raw_data;
++ size_t len;
+
+ if (old == NULL) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+- raw_data = malloc( sizeof(__u32) + strlen(old) + 1 );
++ len = strlen(old);
++ if ((len & 0x3fffffff) != len) {
++ _cap_debug("len is too long for libcap to manage");
++ errno = EINVAL;
++ return NULL;
++ }
++ len += sizeof(__u32) + 1;
++
++ raw_data = malloc(len);
+ if (raw_data == NULL) {
+ errno = ENOMEM;
+ return NULL;
diff --git a/meta/recipes-support/libcap/libcap_2.32.bb b/meta/recipes-support/libcap/libcap_2.32.bb
index d67babb5e9..64d5190aa7 100644
--- a/meta/recipes-support/libcap/libcap_2.32.bb
+++ b/meta/recipes-support/libcap/libcap_2.32.bb
@@ -13,6 +13,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${
file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \
file://0002-tests-do-not-run-target-executables.patch \
file://0001-tests-do-not-statically-link-a-test.patch \
+ file://CVE-2023-2602.patch \
+ file://CVE-2023-2603.patch \
"
SRC_URI[md5sum] = "7416119c9fdcfd0e8dd190a432c668e9"
SRC_URI[sha256sum] = "1005e3d227f2340ad1e3360ef8b69d15e3c72a29c09f4894d7aac038bd26e2be"
diff --git a/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch b/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch
new file mode 100644
index 0000000000..ff9f2f9275
--- /dev/null
+++ b/meta/recipes-support/libksba/libksba/CVE-2022-3515.patch
@@ -0,0 +1,47 @@
+From 4b7d9cd4a018898d7714ce06f3faf2626c14582b Mon Sep 17 00:00:00 2001
+From: Werner Koch <wk@gnupg.org>
+Date: Wed, 5 Oct 2022 14:19:06 +0200
+Subject: [PATCH] Detect a possible overflow directly in the TLV parser.
+
+* src/ber-help.c (_ksba_ber_read_tl): Check for overflow of a commonly
+used sum.
+--
+
+It is quite common to have checks like
+
+ if (ti.nhdr + ti.length >= DIM(tmpbuf))
+ return gpg_error (GPG_ERR_TOO_LARGE);
+
+This patch detects possible integer overflows immmediately when
+creating the TI object.
+
+Reported-by: ZDI-CAN-18927, ZDI-CAN-18928, ZDI-CAN-18929
+
+
+Upstream-Status: Backport [https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libksba.git;a=patch;h=4b7d9cd4a018898d7714ce06f3faf2626c14582b]
+CVE: CVE-2022-3515
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/ber-help.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/ber-help.c b/src/ber-help.c
+index 81c31ed..56efb6a 100644
+--- a/src/ber-help.c
++++ b/src/ber-help.c
+@@ -182,6 +182,12 @@ _ksba_ber_read_tl (ksba_reader_t reader, struct tag_info *ti)
+ ti->length = len;
+ }
+
++ if (ti->length > ti->nhdr && (ti->nhdr + ti->length) < ti->length)
++ {
++ ti->err_string = "header+length would overflow";
++ return gpg_error (GPG_ERR_EOVERFLOW);
++ }
++
+ /* Without this kludge some example certs can't be parsed */
+ if (ti->class == CLASS_UNIVERSAL && !ti->tag)
+ ti->length = 0;
+--
+2.11.0
+
diff --git a/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch b/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch
new file mode 100644
index 0000000000..b09d0eb557
--- /dev/null
+++ b/meta/recipes-support/libksba/libksba/CVE-2022-47629.patch
@@ -0,0 +1,69 @@
+From b17444b3c47e32c77a3ba5335ae30ccbadcba3cf Mon Sep 17 00:00:00 2001
+From: Werner Koch <wk@gnupg.org>
+Date: Tue, 22 Nov 2022 16:36:46 +0100
+Subject: [PATCH] Fix an integer overflow in the CRL signature parser.
+
+* src/crl.c (parse_signature): N+N2 now checked for overflow.
+
+* src/ocsp.c (parse_response_extensions): Do not accept too large
+values.
+(parse_single_extensions): Ditto.
+--
+
+The second patch is an extra safegourd not related to the reported
+bug.
+
+GnuPG-bug-id: 6284
+Reported-by: Joseph Surin, elttam
+CVE: CVE-2022-47629
+https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libksba.git;a=commit;h=f61a5ea4e0f6a80fd4b28ef0174bee77793cf070
+Upstream-Status: Backport
+Signed-off-by: Chee Yang Lee <chee.yang.lee@intel.com>
+---
+ src/crl.c | 2 +-
+ src/ocsp.c | 12 ++++++++++++
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/crl.c b/src/crl.c
+index 87a3fa3..9d3028e 100644
+--- a/src/crl.c
++++ b/src/crl.c
+@@ -1434,7 +1434,7 @@ parse_signature (ksba_crl_t crl)
+ && !ti.is_constructed) )
+ return gpg_error (GPG_ERR_INV_CRL_OBJ);
+ n2 = ti.nhdr + ti.length;
+- if (n + n2 >= DIM(tmpbuf))
++ if (n + n2 >= DIM(tmpbuf) || (n + n2) < n)
+ return gpg_error (GPG_ERR_TOO_LARGE);
+ memcpy (tmpbuf+n, ti.buf, ti.nhdr);
+ err = read_buffer (crl->reader, tmpbuf+n+ti.nhdr, ti.length);
+diff --git a/src/ocsp.c b/src/ocsp.c
+index 4b26f8d..c41234e 100644
+--- a/src/ocsp.c
++++ b/src/ocsp.c
+@@ -912,6 +912,12 @@ parse_response_extensions (ksba_ocsp_t ocsp,
+ else
+ ocsp->good_nonce = 1;
+ }
++ if (ti.length > (1<<24))
++ {
++ /* Bail out on much too large objects. */
++ err = gpg_error (GPG_ERR_BAD_BER);
++ goto leave;
++ }
+ ex = xtrymalloc (sizeof *ex + strlen (oid) + ti.length);
+ if (!ex)
+ {
+@@ -979,6 +985,12 @@ parse_single_extensions (struct ocsp_reqitem_s *ri,
+ err = parse_octet_string (&data, &datalen, &ti);
+ if (err)
+ goto leave;
++ if (ti.length > (1<<24))
++ {
++ /* Bail out on much too large objects. */
++ err = gpg_error (GPG_ERR_BAD_BER);
++ goto leave;
++ }
+ ex = xtrymalloc (sizeof *ex + strlen (oid) + ti.length);
+ if (!ex)
+ {
diff --git a/meta/recipes-support/libksba/libksba_1.3.5.bb b/meta/recipes-support/libksba/libksba_1.3.5.bb
index 7f9ab4f5fc..5293aa91e1 100644
--- a/meta/recipes-support/libksba/libksba_1.3.5.bb
+++ b/meta/recipes-support/libksba/libksba_1.3.5.bb
@@ -22,7 +22,10 @@ inherit autotools binconfig-disabled pkgconfig texinfo
UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html"
SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \
- file://ksba-add-pkgconfig-support.patch"
+ file://ksba-add-pkgconfig-support.patch \
+ file://CVE-2022-47629.patch \
+ file://CVE-2022-3515.patch \
+"
SRC_URI[md5sum] = "8302a3e263a7c630aa7dea7d341f07a2"
SRC_URI[sha256sum] = "41444fd7a6ff73a79ad9728f985e71c9ba8cd3e5e53358e70d5f066d35c1a340"
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch
new file mode 100644
index 0000000000..42ee417fe7
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586-regression.patch
@@ -0,0 +1,30 @@
+From 5d1e62b0155292b994aa1c96d4ed8ce4346ef4c2 Mon Sep 17 00:00:00 2001
+From: Zoltan Herczeg <hzmester@freemail.hu>
+Date: Thu, 24 Mar 2022 05:34:42 +0000
+Subject: [PATCH] Fix incorrect value reading in JIT.
+
+CVE: CVE-2022-1586
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/d4fa336fbcc3]
+
+(cherry picked from commit d4fa336fbcc388f89095b184ba6d99422cfc676c)
+Signed-off-by: Shinu Chandran <shinucha@cisco.com>
+---
+ src/pcre2_jit_compile.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index 493c96d..fa57942 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -7188,7 +7188,7 @@ while (*cc != XCL_END)
+ {
+ SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
+ cc++;
+- if (*cc == PT_CLIST && *cc == XCL_PROP)
++ if (*cc == PT_CLIST && cc[-1] == XCL_PROP)
+ {
+ other_cases = PRIV(ucd_caseless_sets) + cc[1];
+ while (*other_cases != NOTACHAR)
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch
new file mode 100644
index 0000000000..fbbbc9ca77
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1586.patch
@@ -0,0 +1,59 @@
+From 233c4248550d0c1d9bfee42198d5ee0855b7d413 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 23 May 2022 13:52:39 +0530
+Subject: [PATCH] CVE-2022-1586
+
+Upstream-Status: Backport from https://github.com/PCRE2Project/pcre2/commit/50a51cb7e67268e6ad417eb07c9de9bfea5cc55a
+
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ ChangeLog | 3 +++
+ src/pcre2_jit_compile.c | 2 +-
+ src/pcre2_jit_test.c | 4 ++++
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index 0926c29..b5d72dc 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -1,6 +1,9 @@
+ Change Log for PCRE2
+ --------------------
+
++23. Fixed a unicode properrty matching issue in JIT. The character was not
++fully read in caseless matching.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index f564127..5d43865 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -7119,7 +7119,7 @@ while (*cc != XCL_END)
+ {
+ SLJIT_ASSERT(*cc == XCL_PROP || *cc == XCL_NOTPROP);
+ cc++;
+- if (*cc == PT_CLIST)
++ if (*cc == PT_CLIST && *cc == XCL_PROP)
+ {
+ other_cases = PRIV(ucd_caseless_sets) + cc[1];
+ while (*other_cases != NOTACHAR)
+diff --git a/src/pcre2_jit_test.c b/src/pcre2_jit_test.c
+index a9b3880..9df87fd 100644
+--- a/src/pcre2_jit_test.c
++++ b/src/pcre2_jit_test.c
+@@ -408,6 +408,10 @@ static struct regression_test_case regression_test_cases[] = {
+ { MUP, A, 0, 0 | F_PROPERTY, "[\xc3\xa2-\xc3\xa6\xc3\x81-\xc3\x84\xe2\x80\xa8-\xe2\x80\xa9\xe6\x92\xad\\p{Zs}]{2,}", "\xe2\x80\xa7\xe2\x80\xa9\xe6\x92\xad \xe6\x92\xae" },
+ { MUP, A, 0, 0 | F_PROPERTY, "[\\P{L&}]{2}[^\xc2\x85-\xc2\x89\\p{Ll}\\p{Lu}]{2}", "\xc3\xa9\xe6\x92\xad.a\xe6\x92\xad|\xc2\x8a#" },
+ { PCRE2_UCP, 0, 0, 0 | F_PROPERTY, "[a-b\\s]{2,5}[^a]", "AB baaa" },
++ { MUP, 0, 0, 0 | F_NOMATCH, "[^\\p{Hangul}\\p{Z}]", " " },
++ { MUP, 0, 0, 0, "[\\p{Lu}\\P{Latin}]+", "c\xEA\xA4\xAE,A,b" },
++ { MUP, 0, 0, 0, "[\\x{a92e}\\p{Lu}\\P{Latin}]+", "c\xEA\xA4\xAE,A,b" },
++ { CMUP, 0, 0, 0, "[^S]\\B", "\xe2\x80\x8a" },
+
+ /* Possible empty brackets. */
+ { MU, A, 0, 0, "(?:|ab||bc|a)+d", "abcxabcabd" },
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch
new file mode 100644
index 0000000000..70f9f9f079
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-1587.patch
@@ -0,0 +1,660 @@
+From aa5aac0d209e3debf80fc2db924d9401fc50454b Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Mon, 23 May 2022 14:11:11 +0530
+Subject: [PATCH] CVE-2022-1587
+
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/03654e751e7f0700693526b67dfcadda6b42c9d0]
+CVE: CVE-2022-1587
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+
+---
+ ChangeLog | 3 +
+ src/pcre2_jit_compile.c | 290 ++++++++++++++++++++++++++--------------
+ src/pcre2_jit_test.c | 1 +
+ 3 files changed, 194 insertions(+), 100 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index b5d72dc..de82de9 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -4,6 +4,9 @@ Change Log for PCRE2
+ 23. Fixed a unicode properrty matching issue in JIT. The character was not
+ fully read in caseless matching.
+
++24. Fixed an issue affecting recursions in JIT caused by duplicated data
++transfers.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2_jit_compile.c b/src/pcre2_jit_compile.c
+index 5d43865..493c96d 100644
+--- a/src/pcre2_jit_compile.c
++++ b/src/pcre2_jit_compile.c
+@@ -407,6 +407,9 @@ typedef struct compiler_common {
+ /* Locals used by fast fail optimization. */
+ sljit_s32 fast_fail_start_ptr;
+ sljit_s32 fast_fail_end_ptr;
++ /* Variables used by recursive call generator. */
++ sljit_s32 recurse_bitset_size;
++ uint8_t *recurse_bitset;
+
+ /* Flipped and lower case tables. */
+ const sljit_u8 *fcc;
+@@ -2109,19 +2112,39 @@ for (i = 0; i < RECURSE_TMP_REG_COUNT; i++)
+
+ #undef RECURSE_TMP_REG_COUNT
+
++static BOOL recurse_check_bit(compiler_common *common, sljit_sw bit_index)
++{
++uint8_t *byte;
++uint8_t mask;
++
++SLJIT_ASSERT((bit_index & (sizeof(sljit_sw) - 1)) == 0);
++
++bit_index >>= SLJIT_WORD_SHIFT;
++
++mask = 1 << (bit_index & 0x7);
++byte = common->recurse_bitset + (bit_index >> 3);
++
++if (*byte & mask)
++ return FALSE;
++
++*byte |= mask;
++return TRUE;
++}
++
+ static int get_recurse_data_length(compiler_common *common, PCRE2_SPTR cc, PCRE2_SPTR ccend,
+ BOOL *needs_control_head, BOOL *has_quit, BOOL *has_accept)
+ {
+ int length = 1;
+-int size;
++int size, offset;
+ PCRE2_SPTR alternative;
+ BOOL quit_found = FALSE;
+ BOOL accept_found = FALSE;
+ BOOL setsom_found = FALSE;
+ BOOL setmark_found = FALSE;
+-BOOL capture_last_found = FALSE;
+ BOOL control_head_found = FALSE;
+
++memset(common->recurse_bitset, 0, common->recurse_bitset_size);
++
+ #if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+ control_head_found = TRUE;
+@@ -2144,15 +2167,17 @@ while (cc < ccend)
+ setsom_found = TRUE;
+ if (common->mark_ptr != 0)
+ setmark_found = TRUE;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
++ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_KET:
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0)
+ {
+- length++;
++ if (recurse_check_bit(common, offset))
++ length++;
+ SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
+ cc += PRIVATE_DATA(cc + 1);
+ }
+@@ -2169,39 +2194,55 @@ while (cc < ccend)
+ case OP_SBRA:
+ case OP_SBRAPOS:
+ case OP_SCOND:
+- length++;
+ SLJIT_ASSERT(PRIVATE_DATA(cc) != 0);
++ if (recurse_check_bit(common, PRIVATE_DATA(cc)))
++ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_CBRA:
+ case OP_SCBRA:
+- length += 2;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
+- if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
++ offset = GET2(cc, 1 + LINK_SIZE);
++ if (recurse_check_bit(common, OVECTOR(offset << 1)))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, OVECTOR((offset << 1) + 1)));
++ length += 2;
++ }
++ if (common->optimized_cbracket[offset] == 0 && recurse_check_bit(common, OVECTOR_PRIV(offset)))
++ length++;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ length++;
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+- length += 2 + 2;
+- if (common->capture_last_ptr != 0)
+- capture_last_found = TRUE;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ if (recurse_check_bit(common, OVECTOR(offset << 1)))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, OVECTOR((offset << 1) + 1)));
++ length += 2;
++ }
++ if (recurse_check_bit(common, OVECTOR_PRIV(offset)))
++ length++;
++ if (recurse_check_bit(common, PRIVATE_DATA(cc)))
++ length++;
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
++ length++;
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_COND:
+ /* Might be a hidden SCOND. */
+ alternative = cc + GET(cc, 1);
+- if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
++ if ((*alternative == OP_KETRMAX || *alternative == OP_KETRMIN) && recurse_check_bit(common, PRIVATE_DATA(cc)))
+ length++;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length++;
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+@@ -2210,8 +2251,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2219,8 +2264,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 2 + IMM2_SIZE;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2228,20 +2277,29 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length++;
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc) != 0)
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
++ {
++ SLJIT_ASSERT(recurse_check_bit(common, offset + sizeof(sljit_sw)));
+ length += 2;
++ }
+ cc += 1 + IMM2_SIZE;
+ break;
+
+@@ -2253,7 +2311,9 @@ while (cc < ccend)
+ #else
+ size = 1 + 32 / (int)sizeof(PCRE2_UCHAR);
+ #endif
+- if (PRIVATE_DATA(cc) != 0)
++
++ offset = PRIVATE_DATA(cc);
++ if (offset != 0 && recurse_check_bit(common, offset))
+ length += get_class_iterator_size(cc + size);
+ cc += size;
+ break;
+@@ -2288,8 +2348,7 @@ while (cc < ccend)
+ case OP_THEN:
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+ quit_found = TRUE;
+- if (!control_head_found)
+- control_head_found = TRUE;
++ control_head_found = TRUE;
+ cc++;
+ break;
+
+@@ -2309,8 +2368,6 @@ SLJIT_ASSERT(cc == ccend);
+
+ if (control_head_found)
+ length++;
+-if (capture_last_found)
+- length++;
+ if (quit_found)
+ {
+ if (setsom_found)
+@@ -2343,14 +2400,12 @@ sljit_sw shared_srcw[3];
+ sljit_sw kept_shared_srcw[2];
+ int private_count, shared_count, kept_shared_count;
+ int from_sp, base_reg, offset, i;
+-BOOL setsom_found = FALSE;
+-BOOL setmark_found = FALSE;
+-BOOL capture_last_found = FALSE;
+-BOOL control_head_found = FALSE;
++
++memset(common->recurse_bitset, 0, common->recurse_bitset_size);
+
+ #if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+-control_head_found = TRUE;
++recurse_check_bit(common, common->control_head_ptr);
+ #endif
+
+ switch (type)
+@@ -2438,11 +2493,10 @@ while (cc < ccend)
+ {
+ case OP_SET_SOM:
+ SLJIT_ASSERT(common->has_set_som);
+- if (has_quit && !setsom_found)
++ if (has_quit && recurse_check_bit(common, OVECTOR(0)))
+ {
+ kept_shared_srcw[0] = OVECTOR(0);
+ kept_shared_count = 1;
+- setsom_found = TRUE;
+ }
+ cc += 1;
+ break;
+@@ -2450,33 +2504,31 @@ while (cc < ccend)
+ case OP_RECURSE:
+ if (has_quit)
+ {
+- if (common->has_set_som && !setsom_found)
++ if (common->has_set_som && recurse_check_bit(common, OVECTOR(0)))
+ {
+ kept_shared_srcw[0] = OVECTOR(0);
+ kept_shared_count = 1;
+- setsom_found = TRUE;
+ }
+- if (common->mark_ptr != 0 && !setmark_found)
++ if (common->mark_ptr != 0 && recurse_check_bit(common, common->mark_ptr))
+ {
+ kept_shared_srcw[kept_shared_count] = common->mark_ptr;
+ kept_shared_count++;
+- setmark_found = TRUE;
+ }
+ }
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+ shared_srcw[0] = common->capture_last_ptr;
+ shared_count = 1;
+- capture_last_found = TRUE;
+ }
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_KET:
+- if (PRIVATE_DATA(cc) != 0)
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0)
+ {
+- private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ SLJIT_ASSERT(PRIVATE_DATA(cc + 1) != 0);
+ cc += PRIVATE_DATA(cc + 1);
+ }
+@@ -2493,50 +2545,66 @@ while (cc < ccend)
+ case OP_SBRA:
+ case OP_SBRAPOS:
+ case OP_SCOND:
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ cc += 1 + LINK_SIZE;
+ break;
+
+ case OP_CBRA:
+ case OP_SCBRA:
+- offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
+- shared_srcw[0] = OVECTOR(offset);
+- shared_srcw[1] = OVECTOR(offset + 1);
+- shared_count = 2;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ shared_srcw[0] = OVECTOR(offset << 1);
++ if (recurse_check_bit(common, shared_srcw[0]))
++ {
++ shared_srcw[1] = shared_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, shared_srcw[1]));
++ shared_count = 2;
++ }
+
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+- shared_srcw[2] = common->capture_last_ptr;
+- shared_count = 3;
+- capture_last_found = TRUE;
++ shared_srcw[shared_count] = common->capture_last_ptr;
++ shared_count++;
+ }
+
+- if (common->optimized_cbracket[GET2(cc, 1 + LINK_SIZE)] == 0)
++ if (common->optimized_cbracket[offset] == 0)
+ {
+- private_count = 1;
+- private_srcw[0] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
++ private_srcw[0] = OVECTOR_PRIV(offset);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ }
++
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+ case OP_CBRAPOS:
+ case OP_SCBRAPOS:
+- offset = (GET2(cc, 1 + LINK_SIZE)) << 1;
+- shared_srcw[0] = OVECTOR(offset);
+- shared_srcw[1] = OVECTOR(offset + 1);
+- shared_count = 2;
++ offset = GET2(cc, 1 + LINK_SIZE);
++ shared_srcw[0] = OVECTOR(offset << 1);
++ if (recurse_check_bit(common, shared_srcw[0]))
++ {
++ shared_srcw[1] = shared_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, shared_srcw[1]));
++ shared_count = 2;
++ }
+
+- if (common->capture_last_ptr != 0 && !capture_last_found)
++ if (common->capture_last_ptr != 0 && recurse_check_bit(common, common->capture_last_ptr))
+ {
+- shared_srcw[2] = common->capture_last_ptr;
+- shared_count = 3;
+- capture_last_found = TRUE;
++ shared_srcw[shared_count] = common->capture_last_ptr;
++ shared_count++;
+ }
+
+- private_count = 2;
+ private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = OVECTOR_PRIV(GET2(cc, 1 + LINK_SIZE));
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
++
++ offset = OVECTOR_PRIV(offset);
++ if (recurse_check_bit(common, offset))
++ {
++ private_srcw[private_count] = offset;
++ private_count++;
++ }
+ cc += 1 + LINK_SIZE + IMM2_SIZE;
+ break;
+
+@@ -2545,18 +2613,17 @@ while (cc < ccend)
+ alternative = cc + GET(cc, 1);
+ if (*alternative == OP_KETRMAX || *alternative == OP_KETRMIN)
+ {
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
++ if (recurse_check_bit(common, private_srcw[0]))
++ private_count = 1;
+ }
+ cc += 1 + LINK_SIZE;
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc))
+- {
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+ if (common->utf && HAS_EXTRALEN(cc[-1])) cc += GET_EXTRALEN(cc[-1]);
+@@ -2564,11 +2631,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 2;
+ #ifdef SUPPORT_UNICODE
+@@ -2577,11 +2645,12 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = PRIVATE_DATA(cc) + sizeof(sljit_sw);
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 2 + IMM2_SIZE;
+ #ifdef SUPPORT_UNICODE
+@@ -2590,30 +2659,30 @@ while (cc < ccend)
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_1
+- if (PRIVATE_DATA(cc))
+- {
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ private_count = 1;
+- private_srcw[0] = PRIVATE_DATA(cc);
+- }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2A
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 1;
+ break;
+
+ CASE_ITERATOR_TYPE_PRIVATE_DATA_2B
+- if (PRIVATE_DATA(cc))
++ private_srcw[0] = PRIVATE_DATA(cc);
++ if (private_srcw[0] != 0 && recurse_check_bit(common, private_srcw[0]))
+ {
+ private_count = 2;
+- private_srcw[0] = PRIVATE_DATA(cc);
+ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
+ }
+ cc += 1 + IMM2_SIZE;
+ break;
+@@ -2630,14 +2699,17 @@ while (cc < ccend)
+ switch(get_class_iterator_size(cc + i))
+ {
+ case 1:
+- private_count = 1;
+ private_srcw[0] = PRIVATE_DATA(cc);
+ break;
+
+ case 2:
+- private_count = 2;
+ private_srcw[0] = PRIVATE_DATA(cc);
+- private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ if (recurse_check_bit(common, private_srcw[0]))
++ {
++ private_count = 2;
++ private_srcw[1] = private_srcw[0] + sizeof(sljit_sw);
++ SLJIT_ASSERT(recurse_check_bit(common, private_srcw[1]));
++ }
+ break;
+
+ default:
+@@ -2652,28 +2724,25 @@ while (cc < ccend)
+ case OP_PRUNE_ARG:
+ case OP_THEN_ARG:
+ SLJIT_ASSERT(common->mark_ptr != 0);
+- if (has_quit && !setmark_found)
++ if (has_quit && recurse_check_bit(common, common->mark_ptr))
+ {
+ kept_shared_srcw[0] = common->mark_ptr;
+ kept_shared_count = 1;
+- setmark_found = TRUE;
+ }
+- if (common->control_head_ptr != 0 && !control_head_found)
++ if (common->control_head_ptr != 0 && recurse_check_bit(common, common->control_head_ptr))
+ {
+ shared_srcw[0] = common->control_head_ptr;
+ shared_count = 1;
+- control_head_found = TRUE;
+ }
+ cc += 1 + 2 + cc[1];
+ break;
+
+ case OP_THEN:
+ SLJIT_ASSERT(common->control_head_ptr != 0);
+- if (!control_head_found)
++ if (recurse_check_bit(common, common->control_head_ptr))
+ {
+ shared_srcw[0] = common->control_head_ptr;
+ shared_count = 1;
+- control_head_found = TRUE;
+ }
+ cc++;
+ break;
+@@ -2681,7 +2750,7 @@ while (cc < ccend)
+ default:
+ cc = next_opcode(common, cc);
+ SLJIT_ASSERT(cc != NULL);
+- break;
++ continue;
+ }
+
+ if (type != recurse_copy_shared_to_global && type != recurse_copy_kept_shared_to_global)
+@@ -13262,7 +13331,7 @@ SLJIT_ASSERT(!(common->req_char_ptr != 0 && common->start_used_ptr != 0));
+ common->cbra_ptr = OVECTOR_START + (re->top_bracket + 1) * 2 * sizeof(sljit_sw);
+
+ total_length = ccend - common->start;
+-common->private_data_ptrs = (sljit_s32 *)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), allocator_data);
++common->private_data_ptrs = (sljit_s32*)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), allocator_data);
+ if (!common->private_data_ptrs)
+ {
+ SLJIT_FREE(common->optimized_cbracket, allocator_data);
+@@ -13304,6 +13373,7 @@ if (!compiler)
+ common->compiler = compiler;
+
+ /* Main pcre_jit_exec entry. */
++LJIT_ASSERT((private_data_size & (sizeof(sljit_sw) - 1)) == 0);
+ sljit_emit_enter(compiler, 0, SLJIT_ARG1(SW), 5, 5, 0, 0, private_data_size);
+
+ /* Register init. */
+@@ -13524,20 +13594,40 @@ common->fast_fail_end_ptr = 0;
+ common->currententry = common->entries;
+ common->local_quit_available = TRUE;
+ quit_label = common->quit_label;
+-while (common->currententry != NULL)
++if (common->currententry != NULL)
+ {
+- /* Might add new entries. */
+- compile_recurse(common);
+- if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
++ /* A free bit for each private data. */
++ common->recurse_bitset_size = ((private_data_size / (int)sizeof(sljit_sw)) + 7) >> 3;
++ SLJIT_ASSERT(common->recurse_bitset_size > 0);
++ common->recurse_bitset = (sljit_u8*)SLJIT_MALLOC(common->recurse_bitset_size, allocator_data);;
++
++ if (common->recurse_bitset != NULL)
++ {
++ do
++ {
++ /* Might add new entries. */
++ compile_recurse(common);
++ if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
++ break;
++ flush_stubs(common);
++ common->currententry = common->currententry->next;
++ }
++ while (common->currententry != NULL);
++
++ SLJIT_FREE(common->recurse_bitset, allocator_data);
++ }
++
++ if (common->currententry != NULL)
+ {
++ /* The common->recurse_bitset has been freed. */
++ SLJIT_ASSERT(sljit_get_compiler_error(compiler) || common->recurse_bitset == NULL);
++
+ sljit_free_compiler(compiler);
+ SLJIT_FREE(common->optimized_cbracket, allocator_data);
+ SLJIT_FREE(common->private_data_ptrs, allocator_data);
+ PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
+ return PCRE2_ERROR_NOMEMORY;
+ }
+- flush_stubs(common);
+- common->currententry = common->currententry->next;
+ }
+ common->local_quit_available = FALSE;
+ common->quit_label = quit_label;
+diff --git a/src/pcre2_jit_test.c b/src/pcre2_jit_test.c
+index 9df87fd..2f84834 100644
+--- a/src/pcre2_jit_test.c
++++ b/src/pcre2_jit_test.c
+@@ -746,6 +746,7 @@ static struct regression_test_case regression_test_cases[] = {
+ { MU, A, 0, 0, "((?(R)a|(?1)){1,3}?)M", "aaaM" },
+ { MU, A, 0, 0, "((.)(?:.|\\2(?1))){0}#(?1)#", "#aabbccdde# #aabbccddee#" },
+ { MU, A, 0, 0, "((.)(?:\\2|\\2{4}b)){0}#(?:(?1))+#", "#aaaab# #aaaaab#" },
++ { MU, A, 0, 0 | F_NOMATCH, "(?1)$((.|\\2xx){1,2})", "abc" },
+
+ /* 16 bit specific tests. */
+ { CM, A, 0, 0 | F_FORCECONV, "\xc3\xa1", "\xc3\x81\xc3\xa1" },
+--
+2.25.1
+
diff --git a/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
new file mode 100644
index 0000000000..882277ae73
--- /dev/null
+++ b/meta/recipes-support/libpcre/libpcre2/CVE-2022-41409.patch
@@ -0,0 +1,74 @@
+From 94e1c001761373b7d9450768aa15d04c25547a35 Mon Sep 17 00:00:00 2001
+From: Philip Hazel <Philip.Hazel@gmail.com>
+Date: Tue, 16 Aug 2022 17:00:45 +0100
+Subject: [PATCH] Diagnose negative repeat value in pcre2test subject line
+
+CVE: CVE-2022-41409
+Upstream-Status: Backport [https://github.com/PCRE2Project/pcre2/commit/94e1c001761373b7d9450768aa15d04c25547a35]
+
+Signed-off-by: Peter Marko <peter.marko@siemens.com>
+
+---
+ ChangeLog | 3 +++
+ src/pcre2test.c | 4 ++--
+ testdata/testinput2 | 3 +++
+ testdata/testoutput2 | 4 ++++
+ 4 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/ChangeLog b/ChangeLog
+index eab50eb7..276eb57a 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -7,6 +7,9 @@ fully read in caseless matching.
+ 24. Fixed an issue affecting recursions in JIT caused by duplicated data
+ transfers.
+
++20. A negative repeat value in a pcre2test subject line was not being
++diagnosed, leading to infinite looping.
++
+
+ Version 10.34 21-November-2019
+ ------------------------------
+diff --git a/src/pcre2test.c b/src/pcre2test.c
+index 08f86096..f6f5d66c 100644
+--- a/src/pcre2test.c
++++ b/src/pcre2test.c
+@@ -6700,9 +6700,9 @@ while ((c = *p++) != 0)
+ }
+
+ i = (int32_t)li;
+- if (i-- == 0)
++ if (i-- <= 0)
+ {
+- fprintf(outfile, "** Zero repeat not allowed\n");
++ fprintf(outfile, "** Zero or negative repeat not allowed\n");
+ return PR_OK;
+ }
+
+diff --git a/testdata/testinput2 b/testdata/testinput2
+index 655e519..14e00ed 100644
+--- a/testdata/testinput2
++++ b/testdata/testinput2
+@@ -5772,4 +5772,7 @@ a)"xI
+ /(a)?a/I
+ manm
+
++--
++ \[X]{-10}
++
+ # End of testinput2
+diff --git a/testdata/testoutput2 b/testdata/testoutput2
+index c733c12..958f246 100644
+--- a/testdata/testoutput2
++++ b/testdata/testoutput2
+@@ -17435,6 +17435,10 @@ Subject length lower bound = 1
+ manm
+ 0: a
+
++--
++ \[X]{-10}
++** Zero or negative repeat not allowed
++
+ # End of testinput2
+ Error -70: PCRE2_ERROR_BADDATA (unknown error number)
+ Error -62: bad serialized data
diff --git a/meta/recipes-support/libpcre/libpcre2_10.34.bb b/meta/recipes-support/libpcre/libpcre2_10.34.bb
index f2c36944d8..53277270d2 100644
--- a/meta/recipes-support/libpcre/libpcre2_10.34.bb
+++ b/meta/recipes-support/libpcre/libpcre2_10.34.bb
@@ -12,6 +12,10 @@ LIC_FILES_CHKSUM = "file://LICENCE;md5=b1588d3bb4cb0e1f5a597d908f8c5b37"
SRC_URI = "http://downloads.yoctoproject.org/mirror/sources/pcre2-${PV}.tar.bz2 \
file://pcre-cross.patch \
+ file://CVE-2022-1586.patch \
+ file://CVE-2022-1586-regression.patch \
+ file://CVE-2022-1587.patch \
+ file://CVE-2022-41409.patch \
"
SRC_URI[md5sum] = "d280b62ded13f9ccf2fac16ee5286366"
diff --git a/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch b/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch
new file mode 100644
index 0000000000..614047ea7a
--- /dev/null
+++ b/meta/recipes-support/libxslt/libxslt/CVE-2021-30560.patch
@@ -0,0 +1,201 @@
+From 50f9c9cd3b7dfe9b3c8c795247752d1fdcadcac8 Mon Sep 17 00:00:00 2001
+From: Nick Wellnhofer <wellnhofer@aevum.de>
+Date: Sat, 12 Jun 2021 20:02:53 +0200
+Subject: [PATCH] Fix use-after-free in xsltApplyTemplates
+
+xsltApplyTemplates without a select expression could delete nodes in
+the source document.
+
+1. Text nodes with strippable whitespace
+
+Whitespace from input documents is already stripped, so there's no
+need to strip it again. Under certain circumstances, xsltApplyTemplates
+could be fooled into deleting text nodes that are still referenced,
+resulting in a use-after-free.
+
+2. The DTD
+
+The DTD was only unlinked, but there's no good reason to do this just
+now. Maybe it was meant as a micro-optimization.
+
+3. Unknown nodes
+
+Useless and dangerous as well, especially with XInclude nodes.
+See https://gitlab.gnome.org/GNOME/libxml2/-/issues/268
+
+Simply stop trying to uselessly delete nodes when applying a template.
+This part of the code is probably a leftover from a time where
+xsltApplyStripSpaces wasn't implemented yet. Also note that
+xsltApplyTemplates with a select expression never tried to delete
+nodes.
+
+Also stop xsltDefaultProcessOneNode from deleting nodes for the same
+reasons.
+
+This fixes CVE-2021-30560.
+
+CVE: CVE-2021-30560
+Upstream-Status: Backport [https://github.com/GNOME/libxslt/commit/50f9c9cd3b7dfe9b3c8c795247752d1fdcadcac8.patch]
+Comment: No change in any hunk
+Signed-off-by: Omkar Patil <Omkar.Patil@kpit.com>
+
+---
+ libxslt/transform.c | 119 +++-----------------------------------------
+ 1 file changed, 7 insertions(+), 112 deletions(-)
+
+diff --git a/libxslt/transform.c b/libxslt/transform.c
+index 04522154..3aba354f 100644
+--- a/libxslt/transform.c
++++ b/libxslt/transform.c
+@@ -1895,7 +1895,7 @@ static void
+ xsltDefaultProcessOneNode(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ xsltStackElemPtr params) {
+ xmlNodePtr copy;
+- xmlNodePtr delete = NULL, cur;
++ xmlNodePtr cur;
+ int nbchild = 0, oldSize;
+ int childno = 0, oldPos;
+ xsltTemplatePtr template;
+@@ -1968,54 +1968,13 @@ xsltDefaultProcessOneNode(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ return;
+ }
+ /*
+- * Handling of Elements: first pass, cleanup and counting
++ * Handling of Elements: first pass, counting
+ */
+ cur = node->children;
+ while (cur != NULL) {
+- switch (cur->type) {
+- case XML_TEXT_NODE:
+- case XML_CDATA_SECTION_NODE:
+- case XML_DOCUMENT_NODE:
+- case XML_HTML_DOCUMENT_NODE:
+- case XML_ELEMENT_NODE:
+- case XML_PI_NODE:
+- case XML_COMMENT_NODE:
+- nbchild++;
+- break;
+- case XML_DTD_NODE:
+- /* Unlink the DTD, it's still reachable using doc->intSubset */
+- if (cur->next != NULL)
+- cur->next->prev = cur->prev;
+- if (cur->prev != NULL)
+- cur->prev->next = cur->next;
+- break;
+- default:
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: skipping node type %d\n",
+- cur->type));
+-#endif
+- delete = cur;
+- }
++ if (IS_XSLT_REAL_NODE(cur))
++ nbchild++;
+ cur = cur->next;
+- if (delete != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: removing ignorable blank node\n"));
+-#endif
+- xmlUnlinkNode(delete);
+- xmlFreeNode(delete);
+- delete = NULL;
+- }
+- }
+- if (delete != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_PROCESS_NODE,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltDefaultProcessOneNode: removing ignorable blank node\n"));
+-#endif
+- xmlUnlinkNode(delete);
+- xmlFreeNode(delete);
+- delete = NULL;
+ }
+
+ /*
+@@ -4864,7 +4823,7 @@ xsltApplyTemplates(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ xsltStylePreCompPtr comp = (xsltStylePreCompPtr) castedComp;
+ #endif
+ int i;
+- xmlNodePtr cur, delNode = NULL, oldContextNode;
++ xmlNodePtr cur, oldContextNode;
+ xmlNodeSetPtr list = NULL, oldList;
+ xsltStackElemPtr withParams = NULL;
+ int oldXPProximityPosition, oldXPContextSize;
+@@ -4998,73 +4957,9 @@ xsltApplyTemplates(xsltTransformContextPtr ctxt, xmlNodePtr node,
+ else
+ cur = NULL;
+ while (cur != NULL) {
+- switch (cur->type) {
+- case XML_TEXT_NODE:
+- if ((IS_BLANK_NODE(cur)) &&
+- (cur->parent != NULL) &&
+- (cur->parent->type == XML_ELEMENT_NODE) &&
+- (ctxt->style->stripSpaces != NULL)) {
+- const xmlChar *val;
+-
+- if (cur->parent->ns != NULL) {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- cur->parent->name,
+- cur->parent->ns->href);
+- if (val == NULL) {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- BAD_CAST "*",
+- cur->parent->ns->href);
+- }
+- } else {
+- val = (const xmlChar *)
+- xmlHashLookup2(ctxt->style->stripSpaces,
+- cur->parent->name, NULL);
+- }
+- if ((val != NULL) &&
+- (xmlStrEqual(val, (xmlChar *) "strip"))) {
+- delNode = cur;
+- break;
+- }
+- }
+- /* Intentional fall-through */
+- case XML_ELEMENT_NODE:
+- case XML_DOCUMENT_NODE:
+- case XML_HTML_DOCUMENT_NODE:
+- case XML_CDATA_SECTION_NODE:
+- case XML_PI_NODE:
+- case XML_COMMENT_NODE:
+- xmlXPathNodeSetAddUnique(list, cur);
+- break;
+- case XML_DTD_NODE:
+- /* Unlink the DTD, it's still reachable
+- * using doc->intSubset */
+- if (cur->next != NULL)
+- cur->next->prev = cur->prev;
+- if (cur->prev != NULL)
+- cur->prev->next = cur->next;
+- break;
+- case XML_NAMESPACE_DECL:
+- break;
+- default:
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_APPLY_TEMPLATES,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltApplyTemplates: skipping cur type %d\n",
+- cur->type));
+-#endif
+- delNode = cur;
+- }
++ if (IS_XSLT_REAL_NODE(cur))
++ xmlXPathNodeSetAddUnique(list, cur);
+ cur = cur->next;
+- if (delNode != NULL) {
+-#ifdef WITH_XSLT_DEBUG_PROCESS
+- XSLT_TRACE(ctxt,XSLT_TRACE_APPLY_TEMPLATES,xsltGenericDebug(xsltGenericDebugContext,
+- "xsltApplyTemplates: removing ignorable blank cur\n"));
+-#endif
+- xmlUnlinkNode(delNode);
+- xmlFreeNode(delNode);
+- delNode = NULL;
+- }
+ }
+ }
+
diff --git a/meta/recipes-support/libxslt/libxslt_1.1.34.bb b/meta/recipes-support/libxslt/libxslt_1.1.34.bb
index 63cce6fe06..4755677bec 100644
--- a/meta/recipes-support/libxslt/libxslt_1.1.34.bb
+++ b/meta/recipes-support/libxslt/libxslt_1.1.34.bb
@@ -14,6 +14,7 @@ SECTION = "libs"
DEPENDS = "libxml2"
SRC_URI = "http://xmlsoft.org/sources/libxslt-${PV}.tar.gz \
+ file://CVE-2021-30560.patch \
"
SRC_URI[md5sum] = "db8765c8d076f1b6caafd9f2542a304a"
@@ -21,6 +22,10 @@ SRC_URI[sha256sum] = "98b1bd46d6792925ad2dfe9a87452ea2adebf69dcb9919ffd55bf926a7
UPSTREAM_CHECK_REGEX = "libxslt-(?P<pver>\d+(\.\d+)+)\.tar"
+# We have libxml2 2.9.10 and we don't link statically with it anyway
+# so this isn't an issue.
+CVE_CHECK_WHITELIST += "CVE-2022-29824"
+
S = "${WORKDIR}/libxslt-${PV}"
BINCONFIG = "${bindir}/xslt-config"
diff --git a/meta/recipes-support/lz4/lz4_1.9.2.bb b/meta/recipes-support/lz4/lz4_1.9.2.bb
index 0c4a0ac807..bc11a57eb5 100644
--- a/meta/recipes-support/lz4/lz4_1.9.2.bb
+++ b/meta/recipes-support/lz4/lz4_1.9.2.bb
@@ -12,6 +12,10 @@ PE = "1"
SRCREV = "fdf2ef5809ca875c454510610764d9125ef2ebbd"
+# remove at next version upgrade or when output changes
+PR = "r1"
+HASHEQUIV_HASH_VERSION .= ".1"
+
SRC_URI = "git://github.com/lz4/lz4.git;branch=dev;protocol=https \
file://run-ptest \
file://CVE-2021-3520.patch \
@@ -23,7 +27,7 @@ S = "${WORKDIR}/git"
# Fixed in r118, which is larger than the current version.
CVE_CHECK_WHITELIST += "CVE-2014-4715"
-EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
+EXTRA_OEMAKE = "PREFIX=${prefix} CC='${CC}' CFLAGS='${CFLAGS}' DESTDIR=${D} LIBDIR=${libdir} INCLUDEDIR=${includedir} BUILD_STATIC=no"
do_install() {
oe_runmake install
diff --git a/meta/recipes-support/sqlite/files/CVE-2020-35525.patch b/meta/recipes-support/sqlite/files/CVE-2020-35525.patch
new file mode 100644
index 0000000000..27d81d42d9
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2020-35525.patch
@@ -0,0 +1,21 @@
+From: drh <drh@noemail.net>
+Date: Thu, 20 Feb 2020 14:08:51 +0000
+Subject: [PATCH] Early-out on the INTERSECT query processing following an
+ error.
+
+Upstream-Status: Backport [http://security.debian.org/debian-security/pool/updates/main/s/sqlite3/sqlite3_3.27.2-3+deb10u2.debian.tar.xz]
+CVE: CVE-2020-35525
+Signed-off-by: Virendra Thakur <virendrak@kpit.com>
+---
+Index: sqlite-autoconf-3310100/sqlite3.c
+===================================================================
+--- sqlite-autoconf-3310100.orig/sqlite3.c
++++ sqlite-autoconf-3310100/sqlite3.c
+@@ -130767,6 +130767,7 @@ static int multiSelect(
+ /* Generate code to take the intersection of the two temporary
+ ** tables.
+ */
++ if( rc ) break;
+ assert( p->pEList );
+ iBreak = sqlite3VdbeMakeLabel(pParse);
+ iCont = sqlite3VdbeMakeLabel(pParse);
diff --git a/meta/recipes-support/sqlite/files/CVE-2020-35527.patch b/meta/recipes-support/sqlite/files/CVE-2020-35527.patch
new file mode 100644
index 0000000000..d1dae389b0
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2020-35527.patch
@@ -0,0 +1,22 @@
+From: dan <dan@noemail.net>
+Date: Mon, 26 Oct 2020 13:24:36 +0000
+Subject: [PATCH] Fix a problem with ALTER TABLE for views that have a nested
+ FROM clause. Ticket [f50af3e8a565776b].
+
+Upstream-Status: Backport [http://security.debian.org/debian-security/pool/updates/main/s/sqlite3/sqlite3_3.27.2-3+deb10u2.debian.tar.xz]
+CVE: CVE-2020-35527
+Signed-off-by: Virendra Thakur <virendra.thakur@kpit.com>
+---
+Index: sqlite-autoconf-3310100/sqlite3.c
+===================================================================
+--- sqlite-autoconf-3310100.orig/sqlite3.c
++++ sqlite-autoconf-3310100/sqlite3.c
+@@ -133110,7 +133110,7 @@ static int selectExpander(Walker *pWalke
+ pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
+ sqlite3TokenInit(&sColname, zColname);
+ sqlite3ExprListSetName(pParse, pNew, &sColname, 0);
+- if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){
++ if( pNew && (p->selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
+ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
+ sqlite3DbFree(db, pX->zEName);
+ if( pSub ){
diff --git a/meta/recipes-support/sqlite/files/CVE-2021-20223.patch b/meta/recipes-support/sqlite/files/CVE-2021-20223.patch
new file mode 100644
index 0000000000..e9d2e04d30
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2021-20223.patch
@@ -0,0 +1,23 @@
+From d1d43efa4fb0f2098c0e2c5bf2e807c58d5ec05b Mon Sep 17 00:00:00 2001
+From: dan <dan@noemail.net>
+Date: Mon, 26 Oct 2020 13:24:36 +0000
+Subject: [PATCH] Prevent fts5 tokenizer unicode61 from considering '\0' to be
+ a token characters, even if other characters of class "Cc" are.
+
+FossilOrigin-Name: b7b7bde9b7a03665e3691c6d51118965f216d2dfb1617f138b9f9e60e418ed2f
+
+CVE: CVE-2021-20223
+Upstream-Status: Backport [https://github.com/sqlite/sqlite/commit/d1d43efa4fb0f2098c0e2c5bf2e807c58d5ec05b.patch]
+Comment: Removed manifest, manifest.uuid and fts5tok1.test as these files are not present in the amalgamated source code
+Signed-Off-by: Sana.Kazi@kpit.com
+---
+--- a/sqlite3.c 2022-09-09 13:54:30.010768197 +0530
++++ b/sqlite3.c 2022-09-09 13:56:25.458769142 +0530
+@@ -227114,6 +227114,7 @@
+ }
+ iTbl++;
+ }
++ aAscii[0] = 0; /* 0x00 is never a token character */
+ }
+
+ /*
diff --git a/meta/recipes-support/sqlite/files/CVE-2022-35737.patch b/meta/recipes-support/sqlite/files/CVE-2022-35737.patch
new file mode 100644
index 0000000000..341e002913
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2022-35737.patch
@@ -0,0 +1,29 @@
+From 2bbf4c999dbb4b520561a57e0bafc19a15562093 Mon Sep 17 00:00:00 2001
+From: Hitendra Prajapati <hprajapati@mvista.com>
+Date: Fri, 2 Sep 2022 11:22:29 +0530
+Subject: [PATCH] CVE-2022-35737
+
+Upstream-Status: Backport [https://www.sqlite.org/src/info/aab790a16e1bdff7]
+CVE: CVE-2022-35737
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ sqlite3.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index f664217..33dfb78 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -28758,7 +28758,8 @@ SQLITE_API void sqlite3_str_vappendf(
+ case etSQLESCAPE: /* %q: Escape ' characters */
+ case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */
+ case etSQLESCAPE3: { /* %w: Escape " characters */
+- int i, j, k, n, isnull;
++ i64 i, j, k, n;
++ int isnull;
+ int needQuote;
+ char ch;
+ char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */
+--
+2.25.1
+
diff --git a/meta/recipes-support/sqlite/files/CVE-2023-7104.patch b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
new file mode 100644
index 0000000000..01ff29ff5e
--- /dev/null
+++ b/meta/recipes-support/sqlite/files/CVE-2023-7104.patch
@@ -0,0 +1,46 @@
+From eab426c5fba69d2c77023939f72b4ad446834e3c Mon Sep 17 00:00:00 2001
+From: dan <Dan Kennedy>
+Date: Thu, 7 Sep 2023 13:53:09 +0000
+Subject: [PATCH] Fix a buffer overread in the sessions extension that could occur when processing a corrupt changeset.
+
+Upstream-Status: Backport [https://sqlite.org/src/info/0e4e7a05c4204b47]
+CVE: CVE-2023-7104
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ sqlite3.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/sqlite3.c b/sqlite3.c
+index 972ef18..c645ac8 100644
+--- a/sqlite3.c
++++ b/sqlite3.c
+@@ -203301,15 +203301,19 @@ static int sessionReadRecord(
+ }
+ }
+ if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){
+- sqlite3_int64 v = sessionGetI64(aVal);
+- if( eType==SQLITE_INTEGER ){
+- sqlite3VdbeMemSetInt64(apOut[i], v);
++ if( (pIn->nData-pIn->iNext)<8 ){
++ rc = SQLITE_CORRUPT_BKPT;
+ }else{
+- double d;
+- memcpy(&d, &v, 8);
+- sqlite3VdbeMemSetDouble(apOut[i], d);
++ sqlite3_int64 v = sessionGetI64(aVal);
++ if( eType==SQLITE_INTEGER ){
++ sqlite3VdbeMemSetInt64(apOut[i], v);
++ }else{
++ double d;
++ memcpy(&d, &v, 8);
++ sqlite3VdbeMemSetDouble(apOut[i], d);
++ }
++ pIn->iNext += 8;
+ }
+- pIn->iNext += 8;
+ }
+ }
+ }
+--
+2.25.1
+
diff --git a/meta/recipes-support/sqlite/sqlite3_3.31.1.bb b/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
index 877e80f5a3..0e7bcfa5a7 100644
--- a/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
+++ b/meta/recipes-support/sqlite/sqlite3_3.31.1.bb
@@ -13,6 +13,11 @@ SRC_URI = "http://www.sqlite.org/2020/sqlite-autoconf-${SQLITE_PV}.tar.gz \
file://CVE-2020-13630.patch \
file://CVE-2020-13631.patch \
file://CVE-2020-13632.patch \
+ file://CVE-2022-35737.patch \
+ file://CVE-2020-35525.patch \
+ file://CVE-2020-35527.patch \
+ file://CVE-2021-20223.patch \
+ file://CVE-2023-7104.patch \
"
SRC_URI[md5sum] = "2d0a553534c521504e3ac3ad3b90f125"
SRC_URI[sha256sum] = "62284efebc05a76f909c580ffa5c008a7d22a1287285d68b7825a2b6b51949ae"
diff --git a/meta/recipes-support/vim/files/racefix.patch b/meta/recipes-support/vim/files/racefix.patch
deleted file mode 100644
index 1cb8fb442f..0000000000
--- a/meta/recipes-support/vim/files/racefix.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-The creation of the LINGUAS file is duplicated for each desktop file
-which can lead the commands to race against each other. Rework
-the makefile to avoid this as the expense of leaving the file on disk.
-
-Upstream-Status: Pending
-RP 2021/2/15
-
-Index: git/src/po/Makefile
-===================================================================
---- git.orig/src/po/Makefile
-+++ git/src/po/Makefile
-@@ -207,17 +207,16 @@ $(PACKAGE).pot: $(PO_INPUTLIST) $(PO_VIM
- # Delete the temporary files
- rm *.js
-
--vim.desktop: vim.desktop.in $(POFILES)
-+LINGUAS:
- echo $(LANGUAGES) | tr " " "\n" |sed -e '/\./d' | sort > LINGUAS
-+
-+vim.desktop: vim.desktop.in $(POFILES) LINGUAS
- $(MSGFMT) --desktop -d . --template vim.desktop.in -o tmp_vim.desktop
-- rm -f LINGUAS
- if command -v desktop-file-validate; then desktop-file-validate tmp_vim.desktop; fi
- mv tmp_vim.desktop vim.desktop
-
--gvim.desktop: gvim.desktop.in $(POFILES)
-- echo $(LANGUAGES) | tr " " "\n" |sed -e '/\./d' | sort > LINGUAS
-+gvim.desktop: gvim.desktop.in $(POFILES) LINGUAS
- $(MSGFMT) --desktop -d . --template gvim.desktop.in -o tmp_gvim.desktop
-- rm -f LINGUAS
- if command -v desktop-file-validate; then desktop-file-validate tmp_gvim.desktop; fi
- mv tmp_gvim.desktop gvim.desktop
-
diff --git a/meta/recipes-support/vim/vim-tiny_8.2.bb b/meta/recipes-support/vim/vim-tiny_9.0.bb
index e4c26d23f6..e4c26d23f6 100644
--- a/meta/recipes-support/vim/vim-tiny_8.2.bb
+++ b/meta/recipes-support/vim/vim-tiny_9.0.bb
diff --git a/meta/recipes-support/vim/vim.inc b/meta/recipes-support/vim/vim.inc
index 5f01fc3bca..6d62bd67af 100644
--- a/meta/recipes-support/vim/vim.inc
+++ b/meta/recipes-support/vim/vim.inc
@@ -10,31 +10,28 @@ DEPENDS = "ncurses gettext-native"
RSUGGESTS_${PN} = "diffutils"
LICENSE = "vim"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=6b30ea4fa660c483b619924bc709ef99 \
- file://runtime/doc/uganda.txt;md5=daf48235bb824c77fe8ae88d5f575f74"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d1a651ab770b45d41c0f8cb5a8ca930e"
SRC_URI = "git://github.com/vim/vim.git;branch=master;protocol=https \
file://disable_acl_header_check.patch \
file://vim-add-knob-whether-elf.h-are-checked.patch \
file://0001-src-Makefile-improve-reproducibility.patch \
file://no-path-adjust.patch \
- file://racefix.patch \
"
-PV .= ".4681"
-SRCREV = "15f74fab653a784548d5d966644926b47ba2cfa7"
-
-# Remove when 8.3 is out
-UPSTREAM_VERSION_UNKNOWN = "1"
+PV .= ".2190"
+SRCREV = "6a950da86d7a6eb09d5ebeab17657986420d07ac"
# Do not consider .z in x.y.z, as that is updated with every commit
UPSTREAM_CHECK_GITTAGREGEX = "(?P<pver>\d+\.\d+)\.0"
+# Ignore that the upstream version .z in x.y.z is always newer
+UPSTREAM_VERSION_UNKNOWN = "1"
S = "${WORKDIR}/git"
VIMDIR = "vim${@d.getVar('PV').split('.')[0]}${@d.getVar('PV').split('.')[1]}"
-inherit autotools-brokensep update-alternatives mime-xdg
+inherit autotools-brokensep update-alternatives mime-xdg pkgconfig
CLEANBROKEN = "1"
@@ -43,22 +40,18 @@ do_configure () {
cd src
rm -f auto/*
touch auto/config.mk
+ # git timestamps aren't reliable, so touch the shipped .po files so they aren't regenerated
+ touch -c po/cs.cp1250.po po/ja.euc-jp.po po/ja.sjis.po po/ko.po po/pl.UTF-8.po po/pl.cp1250.po po/ru.cp1251.po po/sk.cp1250.po po/uk.cp1251.po po/zh_CN.po po/zh_CN.cp936.po po/zh_TW.po
+ # ru.cp1251.po uses CP1251 rather than cp1251, fix that
+ sed -i -e s/CP1251/cp1251/ po/ru.cp1251.po
aclocal
autoconf
cd ..
oe_runconf
touch src/auto/configure
touch src/auto/config.mk src/auto/config.h
-}
-
-do_compile() {
- # We do not support fully / correctly the following locales. Attempting
- # to use these with msgfmt in order to update the ".desktop" files exposes
- # this problem and leads to the compile failing.
- for LOCALE in cs fr ko pl sk zh_CN zh_TW;do
- echo -n > src/po/${LOCALE}.po
- done
- autotools_do_compile
+ # need a native tool, not a target one
+ ${BUILD_CC} src/po/sjiscorr.c -o src/po/sjiscorr
}
PACKAGECONFIG ??= "\
@@ -82,6 +75,7 @@ EXTRA_OECONF = " \
--disable-netbeans \
--disable-desktop-database-update \
--with-tlib=ncurses \
+ --with-modified-by='${MAINTAINER}' \
ac_cv_small_wchar_t=no \
ac_cv_path_GLIB_COMPILE_RESOURCES=no \
vim_cv_getcwd_broken=no \
@@ -94,6 +88,11 @@ EXTRA_OECONF = " \
STRIP=/bin/true \
"
+# Some host distros don't have it, disable consistently
+# also disable on dunfell target builds
+EXTRA_OECONF_append_class-native = " vim_cv_timer_create=no"
+EXTRA_OECONF_append_class-target = " vim_cv_timer_create=no"
+
do_install() {
autotools_do_install
diff --git a/meta/recipes-support/vim/vim_8.2.bb b/meta/recipes-support/vim/vim_9.0.bb
index 709b6ddb55..709b6ddb55 100644
--- a/meta/recipes-support/vim/vim_8.2.bb
+++ b/meta/recipes-support/vim/vim_9.0.bb
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 8eefcf63a5..2f91a355b0 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://"
GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
REMOTE_URL=${REMOTE_URL%.git}
REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
-REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
diff --git a/scripts/git b/scripts/git
index 8adf5c9ecb..644055e540 100755
--- a/scripts/git
+++ b/scripts/git
@@ -10,7 +10,14 @@ os.environ['PSEUDO_UNLOAD'] = '1'
# calculate path to the real 'git'
path = os.environ['PATH']
-path = path.replace(os.path.dirname(sys.argv[0]), '')
+# we need to remove our path but also any other copy of this script which
+# may be present, e.g. eSDK.
+replacements = [os.path.dirname(sys.argv[0])]
+for p in path.split(":"):
+ if p.endswith("/scripts"):
+ replacements.append(p)
+for r in replacements:
+ path = path.replace(r, '/ignoreme')
real_git = shutil.which('git', path=path)
if len(sys.argv) == 1:
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index c69b5bf4d7..3b76286ba5 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -8,7 +8,7 @@ import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -238,7 +238,7 @@ class BuildStats(dict):
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index e0f8e64b9c..b4f9fbfe45 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -201,9 +201,9 @@ def deploy(args, config, basepath, workspace):
print(' %s' % item)
return 0
- extraoptions = ''
+ extraoptions = '-o HostKeyAlgorithms=+ssh-rsa'
if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
@@ -274,9 +274,9 @@ def undeploy(args, config, basepath, workspace):
elif not args.recipename and not args.all:
raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target')
- extraoptions = ''
+ extraoptions = '-o HostKeyAlgorithms=+ssh-rsa'
if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
index 95384c5333..ff9227035d 100644
--- a/scripts/lib/devtool/menuconfig.py
+++ b/scripts/lib/devtool/menuconfig.py
@@ -43,7 +43,7 @@ def menuconfig(args, config, basepath, workspace):
return 1
check_workspace_recipe(workspace, args.component)
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index f364a45283..cfa88616af 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -357,7 +357,7 @@ def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
-def _copy_file(src, dst, dry_run_outdir=None):
+def _copy_file(src, dst, dry_run_outdir=None, base_outdir=None):
"""Copy a file. Creates all the directory components of destination path."""
dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 798cb0cefe..a2c6d052a6 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -745,6 +745,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index f0ca50ebe2..a349510ab8 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -176,7 +176,10 @@ class ResultsTextReport(object):
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
- vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 8917022d36..c5521d81bd 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
- data = json.load(filedata)
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
@@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section):
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
- return generic_get_log('ptestresuls.sections', results, section)
+ return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 2505c13fce..42704d1e10 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -115,7 +115,7 @@ class DirectPlugin(ImagerPlugin):
updated = False
for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/" or not part.mountpoint.startswith('/'):
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 2cfdc10ecd..05e8471116 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -277,6 +277,13 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
new file mode 100755
index 0000000000..30cc417d3a
--- /dev/null
+++ b/scripts/nativesdk-intercept/chgrp
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chgrp' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chgrp'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chgrp = shutil.which('chgrp', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chgrp, args)
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
new file mode 100755
index 0000000000..3914b3e384
--- /dev/null
+++ b/scripts/nativesdk-intercept/chown
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chown' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chown'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chown = shutil.which('chown', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root:root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chown, args)
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 5eb3e12769..1c2d51c6ec 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -15,7 +15,7 @@ class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
description="Analyse recipe-depends.dot generated by bitbake -g",
- epilog="Use %(prog)s --help to get help")
+ formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
parser.add_argument("-k", "--key",
@@ -32,6 +32,21 @@ class Dot(object):
" For example, A->B, B->C, A->C, then A->C can be removed.",
action="store_true", default=False)
+ parser.epilog = """
+Examples:
+First generate the .dot file:
+ bitbake -g core-image-minimal
+
+To find out why a package is being built:
+ %(prog)s -k <package> -w ./task-depends.dot
+
+To find out what a package depends on:
+ %(prog)s -k <package> -d ./task-depends.dot
+
+Reduce the .dot file packages only, no tasks:
+ %(prog)s -r ./task-depends.dot
+"""
+
self.args = parser.parse_args()
if len(sys.argv) != 3 and len(sys.argv) < 5:
@@ -99,6 +114,10 @@ class Dot(object):
if key == "meta-world-pkgdata":
continue
dep = m.group(2)
+ key = key.split('.')[0]
+ dep = dep.split('.')[0]
+ if key == dep:
+ continue
if key in depends:
if not key in depends[key]:
depends[key].add(dep)
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index b42dac6b88..9d6787ec5a 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -128,7 +128,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index 8c0fdb986a..8079d13750 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -97,11 +97,12 @@ def change_interpreter(elf_file_name):
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
- break
+ return False
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
+ return True
def change_dl_sysdirs(elf_file_name):
if arch == 32:
@@ -215,6 +216,7 @@ else:
executables_list = sys.argv[3:]
+errors = False
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
@@ -240,7 +242,8 @@ for e in executables_list:
arch = get_arch()
if arch:
parse_elf_header()
- change_interpreter(e)
+ if not change_interpreter(e):
+ errors = True
change_dl_sysdirs(e)
""" change permissions back """
@@ -253,3 +256,6 @@ for e in executables_list:
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
+if errors:
+ print("Relocation of one or more executables failed.")
+ sys.exit(-1)
diff --git a/scripts/runqemu b/scripts/runqemu
index 51607f10e5..4dfc0e2d38 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -974,17 +974,14 @@ class BaseConfig(object):
else:
self.nfs_server = '192.168.7.1'
- # Figure out a new nfs_instance to allow multiple qemus running.
- ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
-
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -1034,6 +1031,17 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
@@ -1052,14 +1060,7 @@ class BaseConfig(object):
mac = 2
lockdir = "/tmp/qemu-port-locks"
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
# Find a free port to avoid conflicts
for p in ports[:]:
@@ -1099,14 +1100,7 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
cmd = (ip, 'link')
logger.debug('Running %s...' % str(cmd))
@@ -1423,13 +1417,13 @@ class BaseConfig(object):
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
self.release_taplock()
- self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
diff --git a/scripts/wic b/scripts/wic
index 6547abe0e9..99a8a97ccb 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -206,7 +206,7 @@ def wic_create_subcommand(options, usage_str):
logger.info(" (Please check that the build artifacts for the machine")
logger.info(" selected in local.conf actually exist and that they")
logger.info(" are the correct artifacts for the image (.wks file)).\n")
- raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+ raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir))
krootfs_dir = options.rootfs_dir
if krootfs_dir is None: